2000-05-01 13:42:38 +04:00
/**********************************************************************
1998-01-16 15:13:05 +03:00
gc . c -
$ Author $
created at : Tue Oct 5 09 : 44 : 46 JST 1993
* encoding.c: provide basic features for M17N.
* parse.y: encoding aware parsing.
* parse.y (pragma_encoding): encoding specification pragma.
* parse.y (rb_intern3): encoding specified symbols.
* string.c (rb_str_length): length based on characters.
for older behavior, bytesize method added.
* string.c (rb_str_index_m): index based on characters. rindex as
well.
* string.c (succ_char): encoding aware succeeding string.
* string.c (rb_str_reverse): reverse based on characters.
* string.c (rb_str_inspect): encoding aware string description.
* string.c (rb_str_upcase_bang): encoding aware case conversion.
downcase, capitalize, swapcase as well.
* string.c (rb_str_tr_bang): tr based on characters. delete,
squeeze, tr_s, count as well.
* string.c (rb_str_split_m): split based on characters.
* string.c (rb_str_each_line): encoding aware each_line.
* string.c (rb_str_each_char): added. iteration based on
characters.
* string.c (rb_str_strip_bang): encoding aware whitespace
stripping. lstrip, rstrip as well.
* string.c (rb_str_justify): encoding aware justifying (ljust,
rjust, center).
* string.c (str_encoding): get encoding attribute from a string.
* re.c (rb_reg_initialize): encoding aware regular expression
* sprintf.c (rb_str_format): formatting (i.e. length count) based
on characters.
* io.c (rb_io_getc): getc to return one-character string.
for older behavior, getbyte method added.
* ext/stringio/stringio.c (strio_getc): ditto.
* io.c (rb_io_ungetc): allow pushing arbitrary string at the
current reading point.
* ext/stringio/stringio.c (strio_ungetc): ditto.
* ext/strscan/strscan.c: encoding support.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@13261 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-08-25 07:29:39 +04:00
Copyright ( C ) 1993 - 2007 Yukihiro Matsumoto
2000-05-01 13:42:38 +04:00
Copyright ( C ) 2000 Network Applied Communication Laboratory , Inc .
2000-05-09 08:53:16 +04:00
Copyright ( C ) 2000 Information - technology Promotion Agency , Japan
1998-01-16 15:13:05 +03:00
2000-05-01 13:42:38 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1998-01-16 15:13:05 +03:00
2015-05-20 08:09:00 +03:00
# define rb_data_object_alloc rb_data_object_alloc
# define rb_data_typed_object_alloc rb_data_typed_object_alloc
2018-01-09 09:24:11 +03:00
# include "ruby/encoding.h"
2018-01-09 09:24:10 +03:00
# include "ruby/io.h"
2007-06-10 07:06:15 +04:00
# include "ruby/st.h"
# include "ruby/re.h"
2012-07-10 17:57:11 +04:00
# include "ruby/thread.h"
2008-03-03 11:27:43 +03:00
# include "ruby/util.h"
2013-05-27 01:30:44 +04:00
# include "ruby/debug.h"
2018-01-09 09:24:10 +03:00
# include "internal.h"
2008-03-12 08:47:10 +03:00
# include "eval_intern.h"
* blockinlining.c: remove "yarv" prefix.
* array.c, numeric.c: ditto.
* insnhelper.ci, insns.def, vm_evalbody.ci: ditto.
* yarvcore.c: removed.
* yarvcore.h: renamed to core.h.
* cont.c, debug.c, error.c, process.c, signal.c : ditto.
* ext/probeprofiler/probeprofiler.c: ditto.
* id.c, id.h: added.
* inits.c: ditto.
* compile.c: rename internal functions.
* compile.h: fix debug flag.
* eval.c, object.c, vm.c: remove ruby_top_self.
use rb_vm_top_self() instead.
* eval_intern.h, eval_load: ditto.
* gc.c: rename yarv_machine_stack_mark() to
rb_gc_mark_machine_stack().
* insnhelper.h: remove unused macros.
* iseq.c: add iseq_compile() to create iseq object
from source string.
* proc.c: rename a internal function.
* template/insns.inc.tmpl: remove YARV prefix.
* thread.c:
* vm.c (rb_iseq_eval): added.
* vm.c: move some functions from yarvcore.c.
* vm_dump.c: fix to remove compiler warning.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12741 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-07-12 08:25:46 +04:00
# include "vm_core.h"
2007-05-29 19:49:30 +04:00
# include "gc.h"
2010-10-26 21:27:32 +04:00
# include "constant.h"
2012-11-09 20:05:07 +04:00
# include "ruby_atomic.h"
* probes.d: add DTrace probe declarations. [ruby-core:27448]
* array.c (empty_ary_alloc, ary_new): added array create DTrace probe.
* compile.c (rb_insns_name): allowing DTrace probes to access
instruction sequence name.
* Makefile.in: translate probes.d file to appropriate header file.
* common.mk: declare dependencies on the DTrace header.
* configure.in: add a test for existence of DTrace.
* eval.c (setup_exception): add a probe for when an exception is
raised.
* gc.c: Add DTrace probes for mark begin and end, and sweep begin and
end.
* hash.c (empty_hash_alloc): Add a probe for hash allocation.
* insns.def: Add probes for function entry and return.
* internal.h: function declaration for compile.c change.
* load.c (rb_f_load): add probes for `load` entry and exit, require
entry and exit, and wrapping search_required for load path search.
* object.c (rb_obj_alloc): added a probe for general object creation.
* parse.y (yycompile0): added a probe around parse and compile phase.
* string.c (empty_str_alloc, str_new): DTrace probes for string
allocation.
* test/dtrace/*: tests for DTrace probes.
* vm.c (vm_invoke_proc): add probes for function return on exception
raise, hash create, and instruction sequence execution.
* vm_core.h: add probe declarations for function entry and exit.
* vm_dump.c: add probes header file.
* vm_eval.c (vm_call0_cfunc, vm_call0_cfunc_with_frame): add probe on
function entry and return.
* vm_exec.c: expose instruction number to instruction name function.
* vm_insnshelper.c: add function entry and exit probes for cfunc
methods.
* vm_insnhelper.h: vm usage information is always collected, so
uncomment the functions.
12 19:14:50 2012 Akinori MUSHA <knu@iDaemons.org>
* configure.in (isinf, isnan): isinf() and isnan() are macros on
DragonFly which cannot be found by AC_REPLACE_FUNCS(). This
workaround enforces the fact that they exist on DragonFly.
12 15:59:38 2012 Shugo Maeda <shugo@ruby-lang.org>
* vm_core.h (rb_call_info_t::refinements), compile.c (new_callinfo),
vm_insnhelper.c (vm_search_method): revert r37616 because it's too
slow. [ruby-dev:46477]
* test/ruby/test_refinement.rb (test_inline_method_cache): skip
the test until the bug is fixed efficiently.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37631 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-11-13 01:52:12 +04:00
# include "probes.h"
2015-08-12 11:43:55 +03:00
# include "id_table.h"
1998-01-16 15:13:05 +03:00
# include <stdio.h>
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# include <stdarg.h>
1998-01-16 15:13:05 +03:00
# include <setjmp.h>
2001-11-19 17:42:45 +03:00
# include <sys/types.h>
2016-01-22 11:33:55 +03:00
# include "ruby_assert.h"
2017-05-24 09:46:44 +03:00
# include "debug_counter.h"
2018-10-31 00:53:56 +03:00
# include "transient_heap.h"
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
# include "mjit.h"
2001-11-19 17:42:45 +03:00
2015-05-16 15:17:14 +03:00
# undef rb_data_object_wrap
2014-09-27 05:28:47 +04:00
2013-11-21 13:52:09 +04:00
# ifndef HAVE_MALLOC_USABLE_SIZE
# ifdef _WIN32
2013-11-25 05:13:31 +04:00
# define HAVE_MALLOC_USABLE_SIZE
2013-11-21 13:52:09 +04:00
# define malloc_usable_size(a) _msize(a)
2013-12-03 18:48:20 +04:00
# elif defined HAVE_MALLOC_SIZE
# define HAVE_MALLOC_USABLE_SIZE
# define malloc_usable_size(a) malloc_size(a)
2013-11-21 13:52:09 +04:00
# endif
2013-12-03 18:48:20 +04:00
# endif
# ifdef HAVE_MALLOC_USABLE_SIZE
2018-07-18 01:27:52 +03:00
# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
# include RUBY_ALTERNATIVE_MALLOC_HEADER
# elif HAVE_MALLOC_H
2013-11-22 04:05:34 +04:00
# include <malloc.h>
# elif defined(HAVE_MALLOC_NP_H)
# include <malloc_np.h>
2013-12-03 18:48:20 +04:00
# elif defined(HAVE_MALLOC_MALLOC_H)
# include <malloc / malloc.h>
2013-11-22 04:05:34 +04:00
# endif
2013-11-21 13:52:09 +04:00
# endif
2001-11-19 17:42:45 +03:00
# ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
# endif
1998-01-16 15:13:05 +03:00
2001-11-19 08:03:03 +03:00
# ifdef HAVE_SYS_RESOURCE_H
# include <sys/resource.h>
# endif
2004-07-08 14:27:23 +04:00
# if defined _WIN32 || defined __CYGWIN__
# include <windows.h>
2012-01-09 02:50:59 +04:00
# elif defined(HAVE_POSIX_MEMALIGN)
# elif defined(HAVE_MEMALIGN)
2012-01-07 19:13:37 +04:00
# include <malloc.h>
# endif
2008-03-31 21:58:41 +04:00
# define rb_setjmp(env) RUBY_SETJMP(env)
# define rb_jmp_buf rb_jmpbuf_t
1998-01-16 15:13:05 +03:00
2014-02-21 03:45:55 +04:00
# if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
/* trick the compiler into thinking a external signal handler uses this */
volatile VALUE rb_gc_guarded_val ;
2013-05-17 12:35:26 +04:00
volatile VALUE *
2014-02-21 03:45:55 +04:00
rb_gc_guarded_ptr_val ( volatile VALUE * ptr , VALUE val )
2013-05-17 12:35:26 +04:00
{
2014-02-21 03:45:55 +04:00
rb_gc_guarded_val = val ;
2013-05-17 12:35:26 +04:00
return ptr ;
}
# endif
2013-11-23 07:33:10 +04:00
# ifndef GC_HEAP_INIT_SLOTS
# define GC_HEAP_INIT_SLOTS 10000
2013-09-27 12:01:14 +04:00
# endif
2014-12-16 01:39:33 +03:00
# ifndef GC_HEAP_FREE_SLOTS
# define GC_HEAP_FREE_SLOTS 4096
# endif
2013-09-27 12:01:14 +04:00
# ifndef GC_HEAP_GROWTH_FACTOR
# define GC_HEAP_GROWTH_FACTOR 1.8
# endif
2013-11-23 07:33:10 +04:00
# ifndef GC_HEAP_GROWTH_MAX_SLOTS
# define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
2013-10-24 02:47:29 +04:00
# endif
2014-02-17 07:27:13 +04:00
# ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
# define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
# endif
2013-11-05 08:51:01 +04:00
2014-05-15 13:43:18 +04:00
# ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
2016-04-04 11:49:17 +03:00
# define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
2016-03-31 12:16:48 +03:00
# endif
# ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
# define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
2014-05-15 13:43:18 +04:00
# endif
# ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
2016-04-04 11:49:17 +03:00
# define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
2014-05-15 13:43:18 +04:00
# endif
2013-11-21 12:20:34 +04:00
# ifndef GC_MALLOC_LIMIT_MIN
# define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */ )
2002-10-10 19:26:58 +04:00
# endif
2013-09-27 12:01:14 +04:00
# ifndef GC_MALLOC_LIMIT_MAX
2013-11-07 14:45:01 +04:00
# define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */ )
2013-09-27 12:01:14 +04:00
# endif
2013-09-27 13:36:48 +04:00
# ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
2013-11-07 14:45:01 +04:00
# define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
2013-09-27 12:01:14 +04:00
# endif
2011-03-07 11:39:39 +03:00
2013-11-24 22:13:48 +04:00
# ifndef GC_OLDMALLOC_LIMIT_MIN
# define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */ )
2013-11-05 08:51:01 +04:00
# endif
2013-11-24 22:13:48 +04:00
# ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
# define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
2013-11-05 08:51:01 +04:00
# endif
2013-11-24 22:13:48 +04:00
# ifndef GC_OLDMALLOC_LIMIT_MAX
# define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */ )
2013-11-05 08:51:01 +04:00
# endif
2014-09-08 08:11:00 +04:00
# ifndef PRINT_MEASURE_LINE
# define PRINT_MEASURE_LINE 0
# endif
# ifndef PRINT_ENTER_EXIT_TICK
# define PRINT_ENTER_EXIT_TICK 0
# endif
# ifndef PRINT_ROOT_TICKS
# define PRINT_ROOT_TICKS 0
# endif
2015-04-23 23:40:19 +03:00
# define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
2014-09-08 08:11:00 +04:00
# define TICK_TYPE 1
2011-10-21 17:02:19 +04:00
typedef struct {
2014-02-07 05:54:26 +04:00
size_t heap_init_slots ;
size_t heap_free_slots ;
2013-11-21 12:50:40 +04:00
double growth_factor ;
2014-02-07 05:54:26 +04:00
size_t growth_max_slots ;
2016-03-31 10:45:13 +03:00
double heap_free_slots_min_ratio ;
2016-03-31 12:16:48 +03:00
double heap_free_slots_goal_ratio ;
2016-03-31 10:45:13 +03:00
double heap_free_slots_max_ratio ;
2014-02-17 07:27:13 +04:00
double oldobject_limit_factor ;
2016-03-31 10:45:13 +03:00
2014-02-07 05:54:26 +04:00
size_t malloc_limit_min ;
size_t malloc_limit_max ;
2013-11-21 12:50:40 +04:00
double malloc_limit_growth_factor ;
2016-03-31 10:45:13 +03:00
2014-02-07 05:54:26 +04:00
size_t oldmalloc_limit_min ;
size_t oldmalloc_limit_max ;
2013-11-24 22:13:48 +04:00
double oldmalloc_limit_growth_factor ;
2016-03-31 10:45:13 +03:00
2013-06-19 02:45:41 +04:00
VALUE gc_stress ;
2011-10-21 17:02:19 +04:00
} ruby_gc_params_t ;
2013-11-21 12:50:40 +04:00
static ruby_gc_params_t gc_params = {
2013-11-23 07:33:10 +04:00
GC_HEAP_INIT_SLOTS ,
2014-03-24 09:28:03 +04:00
GC_HEAP_FREE_SLOTS ,
2013-09-27 12:01:14 +04:00
GC_HEAP_GROWTH_FACTOR ,
2013-11-23 07:33:10 +04:00
GC_HEAP_GROWTH_MAX_SLOTS ,
2016-03-31 10:45:13 +03:00
GC_HEAP_FREE_SLOTS_MIN_RATIO ,
2016-03-31 13:16:48 +03:00
GC_HEAP_FREE_SLOTS_GOAL_RATIO ,
2016-03-31 10:45:13 +03:00
GC_HEAP_FREE_SLOTS_MAX_RATIO ,
2014-02-17 07:27:13 +04:00
GC_HEAP_OLDOBJECT_LIMIT_FACTOR ,
2016-03-31 10:45:13 +03:00
2013-11-21 12:20:34 +04:00
GC_MALLOC_LIMIT_MIN ,
2013-09-27 12:01:14 +04:00
GC_MALLOC_LIMIT_MAX ,
2013-09-27 13:36:48 +04:00
GC_MALLOC_LIMIT_GROWTH_FACTOR ,
2016-03-31 10:45:13 +03:00
2013-11-24 22:13:48 +04:00
GC_OLDMALLOC_LIMIT_MIN ,
GC_OLDMALLOC_LIMIT_MAX ,
GC_OLDMALLOC_LIMIT_GROWTH_FACTOR ,
2016-03-31 10:45:13 +03:00
2011-10-21 17:02:19 +04:00
FALSE ,
} ;
2011-03-07 11:39:39 +03:00
2013-08-19 16:00:51 +04:00
/* GC_DEBUG:
* enable to embed GC debugging information .
*/
# ifndef GC_DEBUG
# define GC_DEBUG 0
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if USE_RGENGC
/* RGENGC_DEBUG:
2013-05-22 03:09:22 +04:00
* 1 : basic information
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
* 2 : remember set operation
* 3 : mark
2013-05-14 05:54:48 +04:00
* 4 :
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
* 5 : sweep
*/
# ifndef RGENGC_DEBUG
2017-06-21 07:34:25 +03:00
# ifdef RUBY_DEVEL
# define RGENGC_DEBUG -1
# else
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# define RGENGC_DEBUG 0
# endif
2017-06-21 07:34:25 +03:00
# endif
2017-07-12 12:44:45 +03:00
# if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
2017-07-12 08:30:43 +03:00
# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
# else
# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
2017-06-19 17:36:18 +03:00
# endif
int ruby_rgengc_debug ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
/* RGENGC_CHECK_MODE
2013-05-15 12:07:30 +04:00
* 0 : disable all assertions
* 1 : enable assertions ( to debug RGenGC )
2014-09-08 08:11:00 +04:00
* 2 : enable internal consistency check at each GC ( for debugging )
* 3 : enable internal consistency check at each GC steps ( for debugging )
2015-12-14 05:51:13 +03:00
* 4 : enable liveness check
2014-09-08 08:11:00 +04:00
* 5 : show all references
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
*/
# ifndef RGENGC_CHECK_MODE
2015-03-12 02:15:35 +03:00
# define RGENGC_CHECK_MODE 0
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# endif
2017-06-22 08:03:18 +03:00
# if RGENGC_CHECK_MODE > 0
# define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
# else
# define GC_ASSERT(expr) ((void)0)
# endif
2015-03-18 22:54:14 +03:00
/* RGENGC_OLD_NEWOBJ_CHECK
* 0 : disable all assertions
* > 0 : make a OLD object when new object creation .
*
* Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation .
*/
# ifndef RGENGC_OLD_NEWOBJ_CHECK
# define RGENGC_OLD_NEWOBJ_CHECK 0
# endif
2013-05-15 12:07:30 +04:00
/* RGENGC_PROFILE
* 0 : disable RGenGC profiling
* 1 : enable profiling for basic information
* 2 : enable profiling for each types
*/
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# ifndef RGENGC_PROFILE
2013-06-21 10:04:29 +04:00
# define RGENGC_PROFILE 0
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# endif
2013-11-24 23:49:02 +04:00
/* RGENGC_ESTIMATE_OLDMALLOC
* Enable / disable to estimate increase size of malloc ' ed size by old objects .
2014-01-19 09:43:28 +04:00
* If estimation exceeds threshold , then will invoke full GC .
2013-11-05 08:51:01 +04:00
* 0 : disable estimation .
* 1 : enable estimation .
*/
2013-11-24 23:49:02 +04:00
# ifndef RGENGC_ESTIMATE_OLDMALLOC
# define RGENGC_ESTIMATE_OLDMALLOC 1
2013-11-05 08:51:01 +04:00
# endif
2014-09-08 08:11:00 +04:00
/* RGENGC_FORCE_MAJOR_GC
* Force major / full GC if this macro is not 0.
*/
# ifndef RGENGC_FORCE_MAJOR_GC
# define RGENGC_FORCE_MAJOR_GC 0
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# else /* USE_RGENGC */
2013-11-06 04:14:26 +04:00
2014-09-08 08:11:00 +04:00
# ifdef RGENGC_DEBUG
# undef RGENGC_DEBUG
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# define RGENGC_DEBUG 0
2014-09-08 08:11:00 +04:00
# ifdef RGENGC_CHECK_MODE
# undef RGENGC_CHECK_MODE
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# define RGENGC_CHECK_MODE 0
# define RGENGC_PROFILE 0
2013-11-24 23:49:02 +04:00
# define RGENGC_ESTIMATE_OLDMALLOC 0
2014-09-08 08:11:00 +04:00
# define RGENGC_FORCE_MAJOR_GC 0
2013-11-06 04:14:26 +04:00
# endif /* USE_RGENGC */
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2011-09-10 03:34:05 +04:00
# ifndef GC_PROFILE_MORE_DETAIL
2013-06-21 10:04:29 +04:00
# define GC_PROFILE_MORE_DETAIL 0
2011-09-10 03:34:05 +04:00
# endif
2013-11-09 03:59:20 +04:00
# ifndef GC_PROFILE_DETAIL_MEMORY
# define GC_PROFILE_DETAIL_MEMORY 0
# endif
2014-09-08 08:11:00 +04:00
# ifndef GC_ENABLE_INCREMENTAL_MARK
# define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
# endif
2013-03-24 08:53:44 +04:00
# ifndef GC_ENABLE_LAZY_SWEEP
2013-06-21 10:08:07 +04:00
# define GC_ENABLE_LAZY_SWEEP 1
2013-03-24 08:53:44 +04:00
# endif
2013-06-16 00:18:11 +04:00
# ifndef CALC_EXACT_MALLOC_SIZE
2018-06-20 10:53:29 +03:00
# define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
2013-06-16 00:18:11 +04:00
# endif
2013-12-05 08:54:20 +04:00
# if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
# ifndef MALLOC_ALLOCATED_SIZE
# define MALLOC_ALLOCATED_SIZE 0
# endif
# else
# define MALLOC_ALLOCATED_SIZE 0
# endif
# ifndef MALLOC_ALLOCATED_SIZE_CHECK
# define MALLOC_ALLOCATED_SIZE_CHECK 0
2013-10-17 13:51:41 +04:00
# endif
2011-09-10 03:34:05 +04:00
2015-05-27 05:08:29 +03:00
# ifndef GC_DEBUG_STRESS_TO_CLASS
# define GC_DEBUG_STRESS_TO_CLASS 0
# endif
2015-06-24 21:38:36 +03:00
# ifndef RGENGC_OBJ_INFO
# define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
# endif
2013-05-21 12:19:07 +04:00
typedef enum {
2013-11-24 23:49:02 +04:00
GPR_FLAG_NONE = 0x000 ,
2013-06-21 16:31:13 +04:00
/* major reason */
2013-11-24 23:49:02 +04:00
GPR_FLAG_MAJOR_BY_NOFREE = 0x001 ,
GPR_FLAG_MAJOR_BY_OLDGEN = 0x002 ,
GPR_FLAG_MAJOR_BY_SHADY = 0x004 ,
2014-07-24 15:13:19 +04:00
GPR_FLAG_MAJOR_BY_FORCE = 0x008 ,
2013-11-24 23:49:02 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020 ,
# endif
GPR_FLAG_MAJOR_MASK = 0x0ff ,
2013-06-21 16:31:13 +04:00
/* gc reason */
2013-11-24 23:49:02 +04:00
GPR_FLAG_NEWOBJ = 0x100 ,
GPR_FLAG_MALLOC = 0x200 ,
GPR_FLAG_METHOD = 0x400 ,
GPR_FLAG_CAPI = 0x800 ,
GPR_FLAG_STRESS = 0x1000 ,
2013-06-21 16:31:13 +04:00
/* others */
2013-11-24 23:49:02 +04:00
GPR_FLAG_IMMEDIATE_SWEEP = 0x2000 ,
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
GPR_FLAG_HAVE_FINALIZE = 0x4000 ,
GPR_FLAG_IMMEDIATE_MARK = 0x8000 ,
GPR_FLAG_FULL_MARK = 0x10000
2013-05-21 12:19:07 +04:00
} gc_profile_record_flag ;
2008-08-11 13:36:57 +04:00
typedef struct gc_profile_record {
2013-05-21 12:19:07 +04:00
int flags ;
2008-08-11 13:36:57 +04:00
double gc_time ;
2012-10-31 13:22:37 +04:00
double gc_invoke_time ;
size_t heap_total_objects ;
size_t heap_use_size ;
size_t heap_total_size ;
# if GC_PROFILE_MORE_DETAIL
2008-08-11 13:36:57 +04:00
double gc_mark_time ;
double gc_sweep_time ;
2009-05-13 18:08:26 +04:00
2013-10-18 10:33:36 +04:00
size_t heap_use_pages ;
2008-08-11 13:36:57 +04:00
size_t heap_live_objects ;
size_t heap_free_objects ;
2009-05-13 18:08:26 +04:00
2008-08-11 13:36:57 +04:00
size_t allocate_increase ;
size_t allocate_limit ;
2013-06-16 00:18:11 +04:00
double prepare_time ;
size_t removing_objects ;
size_t empty_objects ;
2013-11-09 03:59:20 +04:00
# if GC_PROFILE_DETAIL_MEMORY
long maxrss ;
long minflt ;
long majflt ;
# endif
2012-10-31 13:22:37 +04:00
# endif
2013-12-05 08:54:20 +04:00
# if MALLOC_ALLOCATED_SIZE
2013-10-03 13:03:06 +04:00
size_t allocated_size ;
# endif
2013-06-20 16:20:27 +04:00
# if RGENGC_PROFILE > 0
2013-11-04 22:59:33 +04:00
size_t old_objects ;
2013-06-20 16:20:27 +04:00
size_t remembered_normal_objects ;
size_t remembered_shady_objects ;
# endif
2008-08-11 13:36:57 +04:00
} gc_profile_record ;
2015-10-18 05:10:34 +03:00
# if defined(_MSC_VER) || defined(__CYGWIN__)
2006-12-31 18:02:22 +03:00
# pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
# endif
typedef struct RVALUE {
union {
struct {
2007-06-25 15:20:45 +04:00
VALUE flags ; /* always 0 for freed obj */
2006-12-31 18:02:22 +03:00
struct RVALUE * next ;
} free ;
struct RBasic basic ;
struct RObject object ;
struct RClass klass ;
struct RFloat flonum ;
struct RString string ;
struct RArray array ;
struct RRegexp regexp ;
struct RHash hash ;
struct RData data ;
2009-06-17 01:36:50 +04:00
struct RTypedData typeddata ;
2006-12-31 18:02:22 +03:00
struct RStruct rstruct ;
struct RBignum bignum ;
struct RFile file ;
struct RMatch match ;
2008-03-16 03:23:43 +03:00
struct RRational rational ;
struct RComplex complex ;
2015-03-11 13:36:17 +03:00
union {
rb_cref_t cref ;
2015-03-11 15:27:34 +03:00
struct vm_svar svar ;
2015-03-11 15:49:27 +03:00
struct vm_throw_data throw_data ;
2015-03-11 16:31:11 +03:00
struct vm_ifunc ifunc ;
2015-03-12 02:13:01 +03:00
struct MEMO memo ;
2015-06-02 07:20:30 +03:00
struct rb_method_entry_struct ment ;
2015-07-22 01:52:59 +03:00
const rb_iseq_t iseq ;
2016-07-28 22:13:26 +03:00
rb_env_t env ;
2018-05-09 10:11:59 +03:00
struct rb_imemo_tmpbuf_struct alloc ;
2017-10-29 18:51:23 +03:00
rb_ast_t ast ;
2015-03-11 13:36:17 +03:00
} imemo ;
2013-05-15 14:26:22 +04:00
struct {
struct RBasic basic ;
VALUE v1 ;
VALUE v2 ;
VALUE v3 ;
} values ;
2006-12-31 18:02:22 +03:00
} as ;
2013-08-19 16:00:51 +04:00
# if GC_DEBUG
2009-06-15 13:06:16 +04:00
const char * file ;
2014-05-18 15:02:43 +04:00
int line ;
2006-12-31 18:02:22 +03:00
# endif
} RVALUE ;
2015-10-18 05:10:34 +03:00
# if defined(_MSC_VER) || defined(__CYGWIN__)
2006-12-31 18:02:22 +03:00
# pragma pack(pop)
# endif
2013-06-21 03:15:18 +04:00
typedef uintptr_t bits_t ;
enum {
BITS_SIZE = sizeof ( bits_t ) ,
BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
} ;
2013-10-18 10:33:36 +04:00
struct heap_page_header {
struct heap_page * page ;
2013-07-17 09:55:39 +04:00
} ;
2013-10-18 10:33:36 +04:00
struct heap_page_body {
struct heap_page_header header ;
2013-07-17 09:55:39 +04:00
/* char gap[]; */
/* RVALUE values[]; */
2008-04-14 07:47:04 +04:00
} ;
2006-12-31 18:02:22 +03:00
2008-04-14 07:47:04 +04:00
struct gc_list {
VALUE * varptr ;
struct gc_list * next ;
} ;
2012-10-03 16:30:21 +04:00
# define STACK_CHUNK_SIZE 500
typedef struct stack_chunk {
VALUE data [ STACK_CHUNK_SIZE ] ;
struct stack_chunk * next ;
} stack_chunk_t ;
typedef struct mark_stack {
stack_chunk_t * chunk ;
stack_chunk_t * cache ;
2014-09-08 08:11:00 +04:00
int index ;
int limit ;
2012-10-03 16:30:21 +04:00
size_t cache_size ;
size_t unused_cache_size ;
} mark_stack_t ;
2013-10-22 14:28:31 +04:00
typedef struct rb_heap_struct {
2014-09-09 09:24:42 +04:00
RVALUE * freelist ;
2013-10-19 03:33:55 +04:00
struct heap_page * free_pages ;
struct heap_page * using_page ;
2018-05-16 23:39:30 +03:00
struct list_head pages ;
struct heap_page * sweeping_page ; /* iterator for .pages */
2014-11-14 04:44:57 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
2014-09-08 08:11:00 +04:00
struct heap_page * pooled_pages ;
2014-11-14 04:44:57 +03:00
# endif
2016-01-08 13:34:14 +03:00
size_t total_pages ; /* total page count in a heap */
2016-01-09 01:15:40 +03:00
size_t total_slots ; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
2013-10-22 14:28:31 +04:00
} rb_heap_t ;
2013-10-19 03:33:55 +04:00
2016-03-04 12:53:03 +03:00
enum gc_mode {
gc_mode_none ,
gc_mode_marking ,
gc_mode_sweeping
2014-09-08 08:11:00 +04:00
} ;
2008-04-27 07:20:35 +04:00
typedef struct rb_objspace {
2008-04-14 07:47:04 +04:00
struct {
2008-05-12 10:28:43 +04:00
size_t limit ;
2018-05-18 11:40:16 +03:00
size_t increase ;
2013-12-05 08:54:20 +04:00
# if MALLOC_ALLOCATED_SIZE
2008-06-08 14:27:06 +04:00
size_t allocated_size ;
size_t allocations ;
# endif
} malloc_params ;
2013-10-19 03:33:55 +04:00
2014-09-09 06:45:21 +04:00
struct {
2016-03-04 13:37:35 +03:00
unsigned int mode : 2 ;
2014-09-09 06:45:21 +04:00
unsigned int immediate_sweep : 1 ;
unsigned int dont_gc : 1 ;
unsigned int dont_incremental : 1 ;
unsigned int during_gc : 1 ;
2014-09-09 17:09:14 +04:00
unsigned int gc_stressful : 1 ;
2015-10-29 09:17:07 +03:00
unsigned int has_hook : 1 ;
2014-09-09 06:45:21 +04:00
# if USE_RGENGC
unsigned int during_minor_gc : 1 ;
# endif
# if GC_ENABLE_INCREMENTAL_MARK
unsigned int during_incremental_marking : 1 ;
# endif
} flags ;
2014-09-08 08:11:00 +04:00
2014-09-09 10:00:57 +04:00
rb_event_flag_t hook_events ;
2014-09-09 14:01:18 +04:00
size_t total_allocated_objects ;
2014-09-09 10:00:57 +04:00
2014-09-09 09:24:42 +04:00
rb_heap_t eden_heap ;
rb_heap_t tomb_heap ; /* heap for zombies and ghosts */
2014-09-09 06:45:21 +04:00
struct {
rb_atomic_t finalizing ;
} atomic_flags ;
2014-09-09 10:00:57 +04:00
struct mark_func_data_struct {
void * data ;
void ( * mark_func ) ( VALUE v , void * data ) ;
} * mark_func_data ;
mark_stack_t mark_stack ;
2014-09-09 14:55:18 +04:00
size_t marked_slots ;
2014-09-08 08:11:00 +04:00
2013-10-22 14:28:31 +04:00
struct {
struct heap_page * * sorted ;
2014-09-09 13:33:52 +04:00
size_t allocated_pages ;
size_t allocatable_pages ;
size_t sorted_length ;
2013-10-22 14:28:31 +04:00
RVALUE * range [ 2 ] ;
2016-03-31 11:21:35 +03:00
size_t freeable_pages ;
2013-10-22 14:28:31 +04:00
/* final */
2013-11-24 23:18:53 +04:00
size_t final_slots ;
2014-06-04 17:33:20 +04:00
VALUE deferred_final ;
2013-10-22 14:28:31 +04:00
} heap_pages ;
2013-10-19 03:33:55 +04:00
2013-10-22 14:28:31 +04:00
st_table * finalizer_table ;
2014-09-09 10:00:57 +04:00
2008-08-11 13:36:57 +04:00
struct {
int run ;
2014-09-09 06:59:08 +04:00
int latest_gc_info ;
2013-06-22 01:51:41 +04:00
gc_profile_record * records ;
gc_profile_record * current_record ;
2013-05-13 20:34:25 +04:00
size_t next_index ;
2008-08-11 13:36:57 +04:00
size_t size ;
2013-05-21 12:19:07 +04:00
2013-06-16 00:18:11 +04:00
# if GC_PROFILE_MORE_DETAIL
double prepare_time ;
# endif
2008-08-11 13:36:57 +04:00
double invoke_time ;
2013-05-13 20:34:25 +04:00
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if USE_RGENGC
size_t minor_gc_count ;
size_t major_gc_count ;
2013-11-04 22:59:33 +04:00
# if RGENGC_PROFILE > 0
2014-09-10 02:32:09 +04:00
size_t total_generated_normal_object_count ;
size_t total_generated_shady_object_count ;
size_t total_shade_operation_count ;
size_t total_promoted_count ;
size_t total_remembered_normal_object_count ;
size_t total_remembered_shady_object_count ;
2013-06-20 16:20:27 +04:00
2013-05-15 12:07:30 +04:00
# if RGENGC_PROFILE >= 2
2013-06-18 06:27:37 +04:00
size_t generated_normal_object_count_types [ RUBY_T_MASK ] ;
2013-05-15 12:07:30 +04:00
size_t generated_shady_object_count_types [ RUBY_T_MASK ] ;
2013-05-26 20:43:21 +04:00
size_t shade_operation_count_types [ RUBY_T_MASK ] ;
2014-09-10 02:32:09 +04:00
size_t promoted_types [ RUBY_T_MASK ] ;
2013-06-07 05:17:19 +04:00
size_t remembered_normal_object_count_types [ RUBY_T_MASK ] ;
2013-05-26 20:43:21 +04:00
size_t remembered_shady_object_count_types [ RUBY_T_MASK ] ;
2013-05-15 12:07:30 +04:00
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# endif /* RGENGC_PROFILE */
# endif /* USE_RGENGC */
2013-06-20 00:49:28 +04:00
/* temporary profiling space */
double gc_sweep_start_time ;
2014-09-09 14:01:18 +04:00
size_t total_allocated_objects_at_gc_start ;
2013-07-17 09:55:39 +04:00
size_t heap_used_at_gc_start ;
2013-11-01 16:49:49 +04:00
/* basic statistics */
size_t count ;
2014-09-09 14:01:18 +04:00
size_t total_freed_objects ;
2014-09-10 06:13:41 +04:00
size_t total_allocated_pages ;
size_t total_freed_pages ;
2008-08-11 13:36:57 +04:00
} profile ;
2008-04-14 07:47:04 +04:00
struct gc_list * global_list ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2014-09-09 08:56:55 +04:00
VALUE gc_stress_mode ;
2013-06-21 03:10:34 +04:00
# if USE_RGENGC
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
struct {
2014-07-25 09:12:06 +04:00
VALUE parent_object ;
2013-05-24 14:21:04 +04:00
int need_major_gc ;
2014-06-09 15:43:23 +04:00
size_t last_major_gc ;
2015-03-18 21:02:13 +03:00
size_t uncollectible_wb_unprotected_objects ;
size_t uncollectible_wb_unprotected_objects_limit ;
2014-09-10 06:35:17 +04:00
size_t old_objects ;
size_t old_objects_limit ;
2013-11-04 22:59:33 +04:00
2013-11-24 23:49:02 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
size_t oldmalloc_increase ;
size_t oldmalloc_increase_limit ;
2013-11-05 08:51:01 +04:00
# endif
2013-06-22 10:43:30 +04:00
# if RGENGC_CHECK_MODE >= 2
2013-11-21 08:57:37 +04:00
struct st_table * allrefs_table ;
size_t error_count ;
2013-06-22 10:43:30 +04:00
# endif
2013-05-24 14:21:04 +04:00
} rgengc ;
2014-11-13 23:31:29 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
2014-11-13 23:16:59 +03:00
struct {
size_t pooled_slots ;
size_t step_slots ;
} rincgc ;
# endif
2013-06-21 03:10:34 +04:00
# endif /* USE_RGENGC */
2015-05-27 05:08:29 +03:00
# if GC_DEBUG_STRESS_TO_CLASS
VALUE stress_to_class ;
# endif
2008-04-14 07:47:04 +04:00
} rb_objspace_t ;
2013-06-21 03:15:18 +04:00
/* default tiny heap size: 16KB */
2016-01-09 01:15:40 +03:00
# define HEAP_PAGE_ALIGN_LOG 14
2013-06-21 03:15:18 +04:00
# define CEILDIV(i, mod) (((i) + (mod) - 1) / (mod))
enum {
2016-01-09 01:15:40 +03:00
HEAP_PAGE_ALIGN = ( 1UL < < HEAP_PAGE_ALIGN_LOG ) ,
HEAP_PAGE_ALIGN_MASK = ( ~ ( ~ 0UL < < HEAP_PAGE_ALIGN_LOG ) ) ,
2013-06-21 03:15:18 +04:00
REQUIRED_SIZE_BY_MALLOC = ( sizeof ( size_t ) * 5 ) ,
2016-01-09 01:15:40 +03:00
HEAP_PAGE_SIZE = ( HEAP_PAGE_ALIGN - REQUIRED_SIZE_BY_MALLOC ) ,
HEAP_PAGE_OBJ_LIMIT = ( unsigned int ) ( ( HEAP_PAGE_SIZE - sizeof ( struct heap_page_header ) ) / sizeof ( struct RVALUE ) ) ,
HEAP_PAGE_BITMAP_LIMIT = CEILDIV ( CEILDIV ( HEAP_PAGE_SIZE , sizeof ( struct RVALUE ) ) , BITS_BITLENGTH ) ,
HEAP_PAGE_BITMAP_SIZE = ( BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT ) ,
HEAP_PAGE_BITMAP_PLANES = USE_RGENGC ? 4 : 1 /* RGENGC: mark, unprotected, uncollectible, marking */
2013-06-21 03:15:18 +04:00
} ;
2013-10-18 10:33:36 +04:00
struct heap_page {
2016-01-08 11:23:58 +03:00
short total_slots ;
short free_slots ;
short final_slots ;
2014-09-08 08:11:00 +04:00
struct {
unsigned int before_sweep : 1 ;
unsigned int has_remembered_objects : 1 ;
2015-03-18 21:02:13 +03:00
unsigned int has_uncollectible_shady_objects : 1 ;
2016-01-08 11:23:58 +03:00
unsigned int in_tomb : 1 ;
2014-09-08 08:11:00 +04:00
} flags ;
2014-11-12 21:57:06 +03:00
struct heap_page * free_next ;
RVALUE * start ;
RVALUE * freelist ;
2018-05-16 23:39:30 +03:00
struct list_node page_node ;
2014-11-12 21:57:06 +03:00
2014-09-08 08:11:00 +04:00
# if USE_RGENGC
2016-01-09 01:15:40 +03:00
bits_t wb_unprotected_bits [ HEAP_PAGE_BITMAP_LIMIT ] ;
2014-09-08 08:11:00 +04:00
# endif
2015-04-15 04:37:47 +03:00
/* the following three bitmaps are cleared at the beginning of full GC */
2016-01-09 01:15:40 +03:00
bits_t mark_bits [ HEAP_PAGE_BITMAP_LIMIT ] ;
2013-06-21 05:26:50 +04:00
# if USE_RGENGC
2016-01-09 01:15:40 +03:00
bits_t uncollectible_bits [ HEAP_PAGE_BITMAP_LIMIT ] ;
bits_t marking_bits [ HEAP_PAGE_BITMAP_LIMIT ] ;
2013-06-21 05:26:50 +04:00
# endif
} ;
2016-01-09 01:15:40 +03:00
# define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
2014-09-08 08:11:00 +04:00
# define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
# define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
2016-01-09 01:15:40 +03:00
# define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / sizeof(RVALUE))
2014-09-08 08:11:00 +04:00
# define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
# define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
# define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
2013-06-21 03:15:18 +04:00
/* Bitmap Operations */
2013-06-21 05:26:50 +04:00
# define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
# define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
# define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
2013-06-21 03:15:18 +04:00
2014-09-08 08:11:00 +04:00
/* getting bitmap */
# define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
# if USE_RGENGC
2015-03-18 21:02:13 +03:00
# define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
2014-09-08 08:11:00 +04:00
# define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
# define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
# endif
2017-11-19 10:07:42 +03:00
# ifndef ENABLE_VM_OBJSPACE
2017-11-20 04:17:43 +03:00
# define ENABLE_VM_OBJSPACE 1
2017-11-19 10:07:42 +03:00
# endif
2013-06-21 03:15:18 +04:00
/* Aliases */
2008-04-27 07:20:35 +04:00
# if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
2016-03-15 09:42:29 +03:00
# define rb_objspace (*rb_objspace_of(GET_VM()))
# define rb_objspace_of(vm) ((vm)->objspace)
2008-04-27 07:20:35 +04:00
# else
2013-11-21 12:20:34 +04:00
static rb_objspace_t rb_objspace = { { GC_MALLOC_LIMIT_MIN } } ;
2016-03-15 09:42:29 +03:00
# define rb_objspace_of(vm) (&rb_objspace)
2008-04-27 07:20:35 +04:00
# endif
2013-07-18 03:19:38 +04:00
2014-09-09 08:56:55 +04:00
# define ruby_initial_gc_stress gc_params.gc_stress
2015-07-22 01:52:59 +03:00
2014-09-09 08:56:55 +04:00
VALUE * ruby_initial_gc_stress_ptr = & ruby_initial_gc_stress ;
2008-06-08 14:27:06 +04:00
# define malloc_limit objspace->malloc_params.limit
2018-05-18 11:40:16 +03:00
# define malloc_increase objspace->malloc_params.increase
2013-06-16 00:18:11 +04:00
# define malloc_allocated_size objspace->malloc_params.allocated_size
2013-10-22 14:28:31 +04:00
# define heap_pages_sorted objspace->heap_pages.sorted
2014-09-09 13:33:52 +04:00
# define heap_allocated_pages objspace->heap_pages.allocated_pages
# define heap_pages_sorted_length objspace->heap_pages.sorted_length
2013-10-22 14:28:31 +04:00
# define heap_pages_lomem objspace->heap_pages.range[0]
# define heap_pages_himem objspace->heap_pages.range[1]
2014-09-09 13:33:52 +04:00
# define heap_allocatable_pages objspace->heap_pages.allocatable_pages
2016-03-31 11:21:35 +03:00
# define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
2013-11-24 23:18:53 +04:00
# define heap_pages_final_slots objspace->heap_pages.final_slots
2013-10-22 14:28:31 +04:00
# define heap_pages_deferred_final objspace->heap_pages.deferred_final
# define heap_eden (&objspace->eden_heap)
2013-10-23 12:48:54 +04:00
# define heap_tomb (&objspace->tomb_heap)
2008-04-14 07:47:04 +04:00
# define dont_gc objspace->flags.dont_gc
# define during_gc objspace->flags.during_gc
2014-09-09 06:45:21 +04:00
# define finalizing objspace->atomic_flags.finalizing
2013-10-22 14:28:31 +04:00
# define finalizer_table objspace->finalizer_table
2014-07-10 07:24:17 +04:00
# define global_list objspace->global_list
2014-09-09 17:09:14 +04:00
# define ruby_gc_stressful objspace->flags.gc_stressful
2014-09-09 08:56:55 +04:00
# define ruby_gc_stress_mode objspace->gc_stress_mode
2015-05-27 05:08:29 +03:00
# if GC_DEBUG_STRESS_TO_CLASS
# define stress_to_class objspace->stress_to_class
# else
# define stress_to_class 0
# endif
2008-04-14 07:47:04 +04:00
2016-03-04 12:53:03 +03:00
static inline enum gc_mode
gc_mode_verify ( enum gc_mode mode )
{
# if RGENGC_CHECK_MODE > 0
switch ( mode ) {
case gc_mode_none :
case gc_mode_marking :
case gc_mode_sweeping :
break ;
default :
rb_bug ( " gc_mode_verify: unreachable (%d) " , ( int ) mode ) ;
}
# endif
return mode ;
}
2016-03-04 13:37:35 +03:00
# define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
# define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
2016-03-04 12:53:03 +03:00
# define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
# define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
2014-11-09 07:52:52 +03:00
# if USE_RGENGC
2014-09-09 06:45:21 +04:00
# define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
2014-11-09 07:52:52 +03:00
# else
# define is_full_marking(objspace) TRUE
# endif
# if GC_ENABLE_INCREMENTAL_MARK
# define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
# else
# define is_incremental_marking(objspace) FALSE
# endif
# if GC_ENABLE_INCREMENTAL_MARK
# define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
# else
# define will_be_incremental_marking(objspace) FALSE
# endif
2018-05-16 23:39:30 +03:00
# define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
2014-10-27 07:40:13 +03:00
# define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
2014-09-08 08:11:00 +04:00
2012-12-20 11:43:54 +04:00
# if SIZEOF_LONG == SIZEOF_VOIDP
# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
2012-12-22 08:25:18 +04:00
# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
2012-12-20 11:43:54 +04:00
# elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
2012-12-22 08:25:18 +04:00
# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
( ( objid ) ^ FIXNUM_FLAG ) : ( NUM2PTR ( objid ) < < 1 ) )
2012-12-20 11:43:54 +04:00
# else
# error not supported
# endif
2011-09-29 15:09:48 +04:00
2012-08-05 14:39:37 +04:00
# define RANY(o) ((RVALUE*)(o))
2014-06-04 17:33:20 +04:00
struct RZombie {
struct RBasic basic ;
VALUE next ;
void ( * dfree ) ( void * ) ;
void * data ;
} ;
# define RZOMBIE(o) ((struct RZombie *)(o))
2013-07-18 03:19:38 +04:00
# define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
2018-01-19 06:57:53 +03:00
# if RUBY_MARK_FREE_DEBUG
2012-08-05 14:39:37 +04:00
int ruby_gc_debug_indent = 0 ;
2018-01-19 06:57:53 +03:00
# endif
2012-08-05 14:39:37 +04:00
VALUE rb_mGC ;
2014-09-09 08:12:14 +04:00
int ruby_disable_gc = 0 ;
2012-01-07 18:02:23 +04:00
2015-07-22 01:52:59 +03:00
void rb_iseq_mark ( const rb_iseq_t * iseq ) ;
void rb_iseq_free ( const rb_iseq_t * iseq ) ;
2013-07-18 03:19:38 +04:00
void rb_gcdebug_print_obj_condition ( VALUE obj ) ;
2009-09-18 11:29:17 +04:00
static void rb_objspace_call_finalizer ( rb_objspace_t * objspace ) ;
2012-03-13 07:37:06 +04:00
static VALUE define_final0 ( VALUE obj , VALUE block ) ;
2012-08-05 14:39:37 +04:00
static void negative_size_allocation_error ( const char * ) ;
2012-10-03 16:30:21 +04:00
static void init_mark_stack ( mark_stack_t * stack ) ;
2013-07-18 03:19:38 +04:00
static int ready_to_gc ( rb_objspace_t * objspace ) ;
2014-09-08 08:11:00 +04:00
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
static int garbage_collect ( rb_objspace_t * , int reason ) ;
2014-09-08 08:11:00 +04:00
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
static int gc_start ( rb_objspace_t * objspace , int reason ) ;
2014-09-08 08:11:00 +04:00
static void gc_rest ( rb_objspace_t * objspace ) ;
static inline void gc_enter ( rb_objspace_t * objspace , const char * event ) ;
static inline void gc_exit ( rb_objspace_t * objspace , const char * event ) ;
static void gc_marks ( rb_objspace_t * objspace , int full_mark ) ;
static void gc_marks_start ( rb_objspace_t * objspace , int full ) ;
static int gc_marks_finish ( rb_objspace_t * objspace ) ;
static void gc_marks_rest ( rb_objspace_t * objspace ) ;
2014-11-14 04:44:57 +03:00
static void gc_marks_step ( rb_objspace_t * objspace , int slots ) ;
2014-09-08 08:11:00 +04:00
static void gc_marks_continue ( rb_objspace_t * objspace , rb_heap_t * heap ) ;
static void gc_sweep ( rb_objspace_t * objspace ) ;
static void gc_sweep_start ( rb_objspace_t * objspace ) ;
static void gc_sweep_finish ( rb_objspace_t * objspace ) ;
static int gc_sweep_step ( rb_objspace_t * objspace , rb_heap_t * heap ) ;
static void gc_sweep_rest ( rb_objspace_t * objspace ) ;
static void gc_sweep_continue ( rb_objspace_t * objspace , rb_heap_t * heap ) ;
2016-07-26 12:57:50 +03:00
static inline void gc_mark ( rb_objspace_t * objspace , VALUE ptr ) ;
2014-09-11 14:23:36 +04:00
static void gc_mark_ptr ( rb_objspace_t * objspace , VALUE ptr ) ;
2018-11-06 08:06:20 +03:00
NO_SANITIZE ( " memory " , static void gc_mark_maybe ( rb_objspace_t * objspace , VALUE ptr ) ) ;
2013-06-17 06:54:25 +04:00
static void gc_mark_children ( rb_objspace_t * objspace , VALUE ptr ) ;
2014-09-08 08:11:00 +04:00
static int gc_mark_stacked_objects_incremental ( rb_objspace_t * , size_t count ) ;
static int gc_mark_stacked_objects_all ( rb_objspace_t * ) ;
static void gc_grey ( rb_objspace_t * objspace , VALUE ptr ) ;
2015-03-18 22:54:14 +03:00
static inline int gc_mark_set ( rb_objspace_t * objspace , VALUE obj ) ;
2018-11-06 08:06:20 +03:00
NO_SANITIZE ( " memory " , static inline int is_pointer_to_heap ( rb_objspace_t * objspace , void * ptr ) ) ;
2014-09-08 08:11:00 +04:00
static void push_mark_stack ( mark_stack_t * , VALUE ) ;
static int pop_mark_stack ( mark_stack_t * , VALUE * ) ;
static size_t mark_stack_size ( mark_stack_t * stack ) ;
static void shrink_stack_chunk_cache ( mark_stack_t * stack ) ;
2014-08-17 14:51:33 +04:00
static size_t obj_memsize_of ( VALUE obj , int use_all_types ) ;
2014-06-03 11:50:23 +04:00
static VALUE gc_verify_internal_consistency ( VALUE self ) ;
2014-09-08 08:11:00 +04:00
static int gc_verify_heap_page ( rb_objspace_t * objspace , struct heap_page * page , VALUE obj ) ;
static int gc_verify_heap_pages ( rb_objspace_t * objspace ) ;
2013-11-05 08:51:01 +04:00
2014-09-09 08:56:55 +04:00
static void gc_stress_set ( rb_objspace_t * objspace , VALUE flag ) ;
2012-08-05 14:39:37 +04:00
static double getrusage_time ( void ) ;
2013-06-24 02:58:01 +04:00
static inline void gc_prof_setup_new_record ( rb_objspace_t * objspace , int reason ) ;
static inline void gc_prof_timer_start ( rb_objspace_t * ) ;
2013-05-21 12:21:59 +04:00
static inline void gc_prof_timer_stop ( rb_objspace_t * ) ;
2012-08-05 14:39:37 +04:00
static inline void gc_prof_mark_timer_start ( rb_objspace_t * ) ;
static inline void gc_prof_mark_timer_stop ( rb_objspace_t * ) ;
2013-06-20 00:43:33 +04:00
static inline void gc_prof_sweep_timer_start ( rb_objspace_t * ) ;
static inline void gc_prof_sweep_timer_stop ( rb_objspace_t * ) ;
2012-08-05 14:39:37 +04:00
static inline void gc_prof_set_malloc_info ( rb_objspace_t * ) ;
2013-06-22 02:29:09 +04:00
static inline void gc_prof_set_heap_info ( rb_objspace_t * ) ;
2013-06-22 01:51:41 +04:00
# define gc_prof_record(objspace) (objspace)->profile.current_record
2013-12-18 11:58:04 +04:00
# define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
2012-08-05 14:39:37 +04:00
2014-04-10 07:40:34 +04:00
# ifdef HAVE_VA_ARGS_MACRO
2017-04-24 07:20:02 +03:00
# define gc_report(level, objspace, ...) \
2017-07-12 08:30:43 +03:00
if ( ! RGENGC_DEBUG_ENABLED ( level ) ) { } else gc_report_body ( level , objspace , __VA_ARGS__ )
2014-04-10 07:40:34 +04:00
# else
2017-07-12 08:30:43 +03:00
# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
2014-09-08 08:11:00 +04:00
# endif
PRINTF_ARGS ( static void gc_report_body ( int level , rb_objspace_t * objspace , const char * fmt , . . . ) , 3 , 4 ) ;
static const char * obj_info ( VALUE obj ) ;
# define PUSH_MARK_FUNC_DATA(v) do { \
struct mark_func_data_struct * prev_mark_func_data = objspace - > mark_func_data ; \
objspace - > mark_func_data = ( v ) ;
# define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0)
/*
* 1 - TSC ( H / W Time Stamp Counter )
* 2 - getrusage
*/
# ifndef TICK_TYPE
# define TICK_TYPE 1
2014-04-10 07:40:34 +04:00
# endif
2014-09-08 08:11:00 +04:00
# if USE_TICK_T
# if TICK_TYPE == 1
/* the following code is only for internal tuning. */
/* Source code to use RDTSC is quoted and modified from
* http : //www.mcs.anl.gov/~kazutomo/rdtsc.html
* written by Kazutomo Yoshii < kazutomo @ mcs . anl . gov >
*/
# if defined(__GNUC__) && defined(__i386__)
typedef unsigned long long tick_t ;
# define PRItick "llu"
static inline tick_t
tick ( void )
{
unsigned long long int x ;
__asm__ __volatile__ ( " rdtsc " : " =A " ( x ) ) ;
return x ;
}
# elif defined(__GNUC__) && defined(__x86_64__)
typedef unsigned long long tick_t ;
# define PRItick "llu"
static __inline__ tick_t
tick ( void )
{
unsigned long hi , lo ;
__asm__ __volatile__ ( " rdtsc " : " =a " ( lo ) , " =d " ( hi ) ) ;
return ( ( unsigned long long ) lo ) | ( ( ( unsigned long long ) hi ) < < 32 ) ;
}
2016-05-12 21:12:47 +03:00
# elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
2016-03-17 11:14:53 +03:00
typedef unsigned long long tick_t ;
# define PRItick "llu"
static __inline__ tick_t
tick ( void )
{
unsigned long long val = __builtin_ppc_get_timebase ( ) ;
return val ;
}
2014-09-08 08:11:00 +04:00
# elif defined(_WIN32) && defined(_MSC_VER)
# include <intrin.h>
typedef unsigned __int64 tick_t ;
# define PRItick "llu"
static inline tick_t
tick ( void )
{
return __rdtsc ( ) ;
}
# else /* use clock */
typedef clock_t tick_t ;
# define PRItick "llu"
static inline tick_t
tick ( void )
{
return clock ( ) ;
}
# endif /* TSC */
# elif TICK_TYPE == 2
typedef double tick_t ;
# define PRItick "4.9f"
static inline tick_t
tick ( void )
{
return getrusage_time ( ) ;
}
# else /* TICK_TYPE */
# error "choose tick type"
# endif /* TICK_TYPE */
# define MEASURE_LINE(expr) do { \
volatile tick_t start_time = tick ( ) ; \
volatile tick_t end_time ; \
expr ; \
end_time = tick ( ) ; \
fprintf ( stderr , " 0 \t % " PRItick " \t %s \n " , end_time - start_time , # expr ) ; \
} while ( 0 )
# else /* USE_TICK_T */
# define MEASURE_LINE(expr) expr
# endif /* USE_TICK_T */
2017-06-21 15:51:26 +03:00
# define FL_CHECK2(name, x, pred) \
( ( RGENGC_CHECK_MODE & & SPECIAL_CONST_P ( x ) ) ? \
( rb_bug ( name " : SPECIAL_CONST (%p) " , ( void * ) ( x ) ) , 0 ) : ( pred ) )
# define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
# define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
# define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
2014-09-08 08:11:00 +04:00
# define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
# define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
# define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
2015-03-18 21:02:13 +03:00
# define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
2014-09-08 08:11:00 +04:00
# define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
2016-01-26 01:58:25 +03:00
# define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
2015-03-18 21:02:13 +03:00
# define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
2014-09-08 08:11:00 +04:00
# define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
# define RVALUE_OLD_AGE 3
# define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
static int rgengc_remembered ( rb_objspace_t * objspace , VALUE obj ) ;
2013-06-25 07:24:07 +04:00
static int rgengc_remember ( rb_objspace_t * objspace , VALUE obj ) ;
2013-10-22 14:28:31 +04:00
static void rgengc_mark_and_rememberset_clear ( rb_objspace_t * objspace , rb_heap_t * heap ) ;
static void rgengc_rememberset_mark ( rb_objspace_t * objspace , rb_heap_t * heap ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2014-09-08 08:11:00 +04:00
static inline int
RVALUE_FLAGS_AGE ( VALUE flags )
{
2014-09-14 05:56:53 +04:00
return ( int ) ( ( flags & ( FL_PROMOTED0 | FL_PROMOTED1 ) ) > > RVALUE_AGE_SHIFT ) ;
2014-09-08 08:11:00 +04:00
}
2013-11-04 22:59:33 +04:00
2014-09-08 08:11:00 +04:00
# endif /* USE_RGENGC */
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2015-03-18 21:46:10 +03:00
# if RGENGC_CHECK_MODE == 0
2013-06-21 23:36:48 +04:00
static inline VALUE
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( const VALUE obj )
2013-06-19 18:34:11 +04:00
{
2015-03-18 21:46:10 +03:00
return obj ;
}
# else
static VALUE
check_rvalue_consistency ( const VALUE obj )
{
2014-09-08 08:11:00 +04:00
rb_objspace_t * objspace = & rb_objspace ;
2015-03-18 21:46:10 +03:00
if ( SPECIAL_CONST_P ( obj ) ) {
2014-09-08 08:11:00 +04:00
rb_bug ( " check_rvalue_consistency: %p is a special const. " , ( void * ) obj ) ;
}
2015-03-18 21:46:10 +03:00
else if ( ! is_pointer_to_heap ( objspace , ( void * ) obj ) ) {
rb_bug ( " check_rvalue_consistency: %p is not a Ruby object. " , ( void * ) obj ) ;
}
2014-09-08 08:11:00 +04:00
else {
2015-03-18 21:46:10 +03:00
const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP ( obj ) ! = 0 ;
const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP ( obj ) ! = 0 ;
const int mark_bit = RVALUE_MARK_BITMAP ( obj ) ! = 0 ;
const int marking_bit = RVALUE_MARKING_BITMAP ( obj ) ! = 0 , remembered_bit = marking_bit ;
const int age = RVALUE_FLAGS_AGE ( RBASIC ( obj ) - > flags ) ;
2013-06-19 18:34:11 +04:00
2015-03-20 07:28:41 +03:00
if ( BUILTIN_TYPE ( obj ) = = T_NONE ) rb_bug ( " check_rvalue_consistency: %s is T_NONE " , obj_info ( obj ) ) ;
if ( BUILTIN_TYPE ( obj ) = = T_ZOMBIE ) rb_bug ( " check_rvalue_consistency: %s is T_ZOMBIE " , obj_info ( obj ) ) ;
2013-11-05 08:51:01 +04:00
obj_memsize_of ( ( VALUE ) obj , FALSE ) ;
2014-09-08 08:11:00 +04:00
/* check generation
*
* OLD = = age = = 3 & & old - bitmap & & mark - bit ( except incremental marking )
*/
if ( age > 0 & & wb_unprotected_bit ) {
rb_bug ( " check_rvalue_consistency: %s is not WB protected, but age is %d > 0. " , obj_info ( obj ) , age ) ;
2013-06-21 23:36:48 +04:00
}
2015-03-18 21:46:10 +03:00
if ( ! is_marking ( objspace ) & & uncollectible_bit & & ! mark_bit ) {
2015-03-18 21:02:13 +03:00
rb_bug ( " check_rvalue_consistency: %s is uncollectible, but is not marked while !gc. " , obj_info ( obj ) ) ;
2014-09-08 08:11:00 +04:00
}
2015-03-18 21:46:10 +03:00
if ( ! is_full_marking ( objspace ) ) {
if ( uncollectible_bit & & age ! = RVALUE_OLD_AGE & & ! wb_unprotected_bit ) {
rb_bug ( " check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected. " , obj_info ( obj ) , age ) ;
}
if ( remembered_bit & & age ! = RVALUE_OLD_AGE ) {
2018-01-18 14:44:10 +03:00
rb_bug ( " check_rvalue_consistency: %s is remembered, but not old (age: %d). " , obj_info ( obj ) , age ) ;
2015-03-18 21:46:10 +03:00
}
}
2014-09-08 08:11:00 +04:00
/*
* check coloring
*
* marking : false marking : true
* marked : false white * invalid *
* marked : true black grey
*/
2015-03-18 21:46:10 +03:00
if ( is_incremental_marking ( objspace ) & & marking_bit ) {
if ( ! is_marking ( objspace ) & & ! mark_bit ) rb_bug ( " check_rvalue_consistency: %s is marking, but not marked. " , obj_info ( obj ) ) ;
2013-11-04 22:59:33 +04:00
}
}
2013-06-21 23:36:48 +04:00
return obj ;
2013-06-19 18:34:11 +04:00
}
2015-03-18 21:46:10 +03:00
# endif
2013-06-19 18:34:11 +04:00
2014-06-03 11:44:19 +04:00
static inline int
2014-09-08 08:11:00 +04:00
RVALUE_MARKED ( VALUE obj )
2013-11-04 22:59:33 +04:00
{
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
return RVALUE_MARK_BITMAP ( obj ) ! = 0 ;
2013-11-04 22:59:33 +04:00
}
2014-09-08 08:11:00 +04:00
# if USE_RGENGC
2014-06-03 11:44:19 +04:00
static inline int
2014-09-08 08:11:00 +04:00
RVALUE_WB_UNPROTECTED ( VALUE obj )
2013-11-04 22:59:33 +04:00
{
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
return RVALUE_WB_UNPROTECTED_BITMAP ( obj ) ! = 0 ;
2013-11-04 22:59:33 +04:00
}
2014-06-03 11:44:19 +04:00
static inline int
2014-09-08 08:11:00 +04:00
RVALUE_MARKING ( VALUE obj )
2013-11-04 22:59:33 +04:00
{
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
return RVALUE_MARKING_BITMAP ( obj ) ! = 0 ;
2013-11-04 22:59:33 +04:00
}
2014-06-03 11:44:19 +04:00
static inline int
2014-09-08 08:11:00 +04:00
RVALUE_REMEMBERED ( VALUE obj )
2013-11-04 22:59:33 +04:00
{
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
return RVALUE_MARKING_BITMAP ( obj ) ! = 0 ;
2013-11-04 22:59:33 +04:00
}
2014-09-08 08:11:00 +04:00
static inline int
2015-03-18 21:02:13 +03:00
RVALUE_UNCOLLECTIBLE ( VALUE obj )
2013-05-26 21:27:42 +04:00
{
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
2015-03-18 21:02:13 +03:00
return RVALUE_UNCOLLECTIBLE_BITMAP ( obj ) ! = 0 ;
2014-09-08 08:11:00 +04:00
}
2014-04-08 17:36:02 +04:00
2014-09-08 08:11:00 +04:00
static inline int
RVALUE_OLD_P_RAW ( VALUE obj )
{
const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1 ;
return ( RBASIC ( obj ) - > flags & promoted ) = = promoted ;
}
2014-04-08 17:36:02 +04:00
2014-09-08 08:11:00 +04:00
static inline int
RVALUE_OLD_P ( VALUE obj )
{
check_rvalue_consistency ( obj ) ;
return RVALUE_OLD_P_RAW ( obj ) ;
}
2014-04-08 17:36:02 +04:00
2014-09-08 08:11:00 +04:00
# if RGENGC_CHECK_MODE || GC_DEBUG
static inline int
RVALUE_AGE ( VALUE obj )
{
check_rvalue_consistency ( obj ) ;
return RVALUE_FLAGS_AGE ( RBASIC ( obj ) - > flags ) ;
}
2013-11-04 22:59:33 +04:00
# endif
2014-04-08 17:36:02 +04:00
2014-09-08 08:11:00 +04:00
static inline void
2015-03-18 21:02:13 +03:00
RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET ( rb_objspace_t * objspace , struct heap_page * page , VALUE obj )
2014-09-08 08:11:00 +04:00
{
2015-03-18 21:02:13 +03:00
MARK_IN_BITMAP ( & page - > uncollectible_bits [ 0 ] , obj ) ;
2014-09-10 06:35:17 +04:00
objspace - > rgengc . old_objects + + ;
2018-10-31 00:53:56 +03:00
rb_transient_heap_promote ( obj ) ;
2013-11-04 22:59:33 +04:00
# if RGENGC_PROFILE >= 2
2014-09-10 02:32:09 +04:00
objspace - > profile . total_promoted_count + + ;
objspace - > profile . promoted_types [ BUILTIN_TYPE ( obj ) ] + + ;
2013-11-04 22:59:33 +04:00
# endif
}
2015-03-18 21:02:13 +03:00
static inline void
RVALUE_OLD_UNCOLLECTIBLE_SET ( rb_objspace_t * objspace , VALUE obj )
{
2018-09-26 10:28:04 +03:00
RB_DEBUG_COUNTER_INC ( obj_promote ) ;
2015-03-18 21:02:13 +03:00
RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET ( objspace , GET_HEAP_PAGE ( obj ) , obj ) ;
}
2014-09-08 08:11:00 +04:00
static inline VALUE
RVALUE_FLAGS_AGE_SET ( VALUE flags , int age )
2013-11-04 22:59:33 +04:00
{
2014-09-08 08:11:00 +04:00
flags & = ~ ( FL_PROMOTED0 | FL_PROMOTED1 ) ;
flags | = ( age < < RVALUE_AGE_SHIFT ) ;
return flags ;
2013-11-04 22:59:33 +04:00
}
2014-09-08 08:11:00 +04:00
/* set age to age+1 */
2013-11-04 22:59:33 +04:00
static inline void
2014-09-08 08:11:00 +04:00
RVALUE_AGE_INC ( rb_objspace_t * objspace , VALUE obj )
2013-11-05 11:48:31 +04:00
{
2014-09-08 08:11:00 +04:00
VALUE flags = RBASIC ( obj ) - > flags ;
int age = RVALUE_FLAGS_AGE ( flags ) ;
2014-04-08 17:36:02 +04:00
2014-09-08 08:11:00 +04:00
if ( RGENGC_CHECK_MODE & & age = = RVALUE_OLD_AGE ) {
rb_bug ( " RVALUE_AGE_INC: can not increment age of OLD object %s. " , obj_info ( obj ) ) ;
2014-06-03 07:55:04 +04:00
}
2014-09-08 08:11:00 +04:00
age + + ;
RBASIC ( obj ) - > flags = RVALUE_FLAGS_AGE_SET ( flags , age ) ;
2014-05-12 14:57:11 +04:00
2014-09-08 08:11:00 +04:00
if ( age = = RVALUE_OLD_AGE ) {
2015-03-18 21:02:13 +03:00
RVALUE_OLD_UNCOLLECTIBLE_SET ( objspace , obj ) ;
2014-09-08 08:11:00 +04:00
}
check_rvalue_consistency ( obj ) ;
}
/* set age to RVALUE_OLD_AGE */
static inline void
RVALUE_AGE_SET_OLD ( rb_objspace_t * objspace , VALUE obj )
{
check_rvalue_consistency ( obj ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( ! RVALUE_OLD_P ( obj ) ) ;
2014-04-08 17:36:02 +04:00
2014-09-08 08:11:00 +04:00
RBASIC ( obj ) - > flags = RVALUE_FLAGS_AGE_SET ( RBASIC ( obj ) - > flags , RVALUE_OLD_AGE ) ;
2015-03-18 21:02:13 +03:00
RVALUE_OLD_UNCOLLECTIBLE_SET ( objspace , obj ) ;
2013-11-04 22:59:33 +04:00
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
2013-05-26 21:27:42 +04:00
}
2013-06-07 06:28:03 +04:00
2014-09-08 08:11:00 +04:00
/* set age to RVALUE_OLD_AGE - 1 */
2013-06-10 12:15:31 +04:00
static inline void
2014-09-08 08:11:00 +04:00
RVALUE_AGE_SET_CANDIDATE ( rb_objspace_t * objspace , VALUE obj )
2013-06-10 12:15:31 +04:00
{
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( ! RVALUE_OLD_P ( obj ) ) ;
2013-11-04 22:59:33 +04:00
2014-09-08 08:11:00 +04:00
RBASIC ( obj ) - > flags = RVALUE_FLAGS_AGE_SET ( RBASIC ( obj ) - > flags , RVALUE_OLD_AGE - 1 ) ;
check_rvalue_consistency ( obj ) ;
}
static inline void
RVALUE_DEMOTE_RAW ( rb_objspace_t * objspace , VALUE obj )
{
RBASIC ( obj ) - > flags = RVALUE_FLAGS_AGE_SET ( RBASIC ( obj ) - > flags , 0 ) ;
2015-03-18 21:02:13 +03:00
CLEAR_IN_BITMAP ( GET_HEAP_UNCOLLECTIBLE_BITS ( obj ) , obj ) ;
2013-06-10 12:15:31 +04:00
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2013-11-04 22:59:33 +04:00
static inline void
2014-09-08 08:11:00 +04:00
RVALUE_DEMOTE ( rb_objspace_t * objspace , VALUE obj )
2013-11-04 22:59:33 +04:00
{
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( RVALUE_OLD_P ( obj ) ) ;
2014-09-08 08:11:00 +04:00
2015-03-18 21:46:10 +03:00
if ( ! is_incremental_marking ( objspace ) & & RVALUE_REMEMBERED ( obj ) ) {
CLEAR_IN_BITMAP ( GET_HEAP_MARKING_BITS ( obj ) , obj ) ;
}
2014-09-08 08:11:00 +04:00
RVALUE_DEMOTE_RAW ( objspace , obj ) ;
if ( RVALUE_MARKED ( obj ) ) {
2014-09-10 06:35:17 +04:00
objspace - > rgengc . old_objects - - ;
2014-06-03 07:55:04 +04:00
}
2013-11-04 22:59:33 +04:00
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
}
2014-12-11 13:15:30 +03:00
static inline void
RVALUE_AGE_RESET_RAW ( VALUE obj )
{
RBASIC ( obj ) - > flags = RVALUE_FLAGS_AGE_SET ( RBASIC ( obj ) - > flags , 0 ) ;
}
2014-09-08 08:11:00 +04:00
static inline void
RVALUE_AGE_RESET ( VALUE obj )
{
check_rvalue_consistency ( obj ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( ! RVALUE_OLD_P ( obj ) ) ;
2014-12-11 13:15:30 +03:00
RVALUE_AGE_RESET_RAW ( obj ) ;
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
}
static inline int
RVALUE_BLACK_P ( VALUE obj )
{
return RVALUE_MARKED ( obj ) & & ! RVALUE_MARKING ( obj ) ;
}
#if 0
static inline int
RVALUE_GREY_P ( VALUE obj )
{
return RVALUE_MARKED ( obj ) & & RVALUE_MARKING ( obj ) ;
}
# endif
static inline int
RVALUE_WHITE_P ( VALUE obj )
{
return RVALUE_MARKED ( obj ) = = FALSE ;
2013-11-04 22:59:33 +04:00
}
# endif /* USE_RGENGC */
2012-08-05 14:39:37 +04:00
/*
- - - - - - - - - - - - - - - - - - - - - - - - - - - ObjectSpace - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*/
2009-09-18 11:29:17 +04:00
2008-04-14 07:47:04 +04:00
rb_objspace_t *
rb_objspace_alloc ( void )
{
2015-09-15 13:38:58 +03:00
# if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
2014-07-26 01:34:35 +04:00
rb_objspace_t * objspace = calloc ( 1 , sizeof ( rb_objspace_t ) ) ;
2015-09-15 13:38:58 +03:00
# else
rb_objspace_t * objspace = & rb_objspace ;
# endif
2013-11-21 12:50:40 +04:00
malloc_limit = gc_params . malloc_limit_min ;
2018-05-16 23:39:30 +03:00
list_head_init ( & objspace - > eden_heap . pages ) ;
list_head_init ( & objspace - > tomb_heap . pages ) ;
2013-11-05 08:51:01 +04:00
2008-04-14 07:47:04 +04:00
return objspace ;
}
2009-09-18 11:29:17 +04:00
2012-10-03 17:23:30 +04:00
static void free_stack_chunks ( mark_stack_t * ) ;
2013-10-23 12:48:54 +04:00
static void heap_page_free ( rb_objspace_t * objspace , struct heap_page * page ) ;
2010-12-03 06:53:21 +03:00
2009-09-18 11:29:17 +04:00
void
rb_objspace_free ( rb_objspace_t * objspace )
{
2015-02-02 23:58:32 +03:00
if ( is_lazy_sweeping ( heap_eden ) )
rb_bug ( " lazy sweeping underway when freeing object space " ) ;
2013-06-22 01:51:41 +04:00
if ( objspace - > profile . records ) {
free ( objspace - > profile . records ) ;
objspace - > profile . records = 0 ;
2009-09-18 11:29:17 +04:00
}
2013-10-22 14:28:31 +04:00
2014-07-10 07:24:17 +04:00
if ( global_list ) {
2009-09-18 11:29:17 +04:00
struct gc_list * list , * next ;
2014-07-10 07:24:17 +04:00
for ( list = global_list ; list ; list = next ) {
2009-09-18 11:29:17 +04:00
next = list - > next ;
2012-01-09 01:02:08 +04:00
xfree ( list ) ;
2009-09-18 11:29:17 +04:00
}
}
2013-10-22 14:28:31 +04:00
if ( heap_pages_sorted ) {
* compile.c, cont.c, gc.c, insns.def, iseq.c, iseq.h, process.c,
thread.c, vm.c, vm_core.h, vm_dump.c, vm_eval.c,
vm_insnhelper.c, vm_method.c, template/insns_info.inc.tmpl,
tool/instruction.rb: fixed types.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@25030 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2009-09-22 00:58:26 +04:00
size_t i ;
2014-09-09 13:33:52 +04:00
for ( i = 0 ; i < heap_allocated_pages ; + + i ) {
2013-10-23 12:48:54 +04:00
heap_page_free ( objspace , heap_pages_sorted [ i ] ) ;
2009-09-18 11:29:17 +04:00
}
2013-10-22 14:28:31 +04:00
free ( heap_pages_sorted ) ;
2014-09-09 13:33:52 +04:00
heap_allocated_pages = 0 ;
heap_pages_sorted_length = 0 ;
2013-10-22 14:28:31 +04:00
heap_pages_lomem = 0 ;
heap_pages_himem = 0 ;
2016-01-08 13:34:14 +03:00
objspace - > eden_heap . total_pages = 0 ;
2013-11-27 05:42:25 +04:00
objspace - > eden_heap . total_slots = 0 ;
2009-09-18 11:29:17 +04:00
}
2012-10-03 16:30:21 +04:00
free_stack_chunks ( & objspace - > mark_stack ) ;
2015-09-15 13:38:58 +03:00
# if !(defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE)
if ( objspace = = & rb_objspace ) return ;
# endif
2009-09-18 11:29:17 +04:00
free ( objspace ) ;
}
2008-04-14 07:47:04 +04:00
2017-06-22 03:59:54 +03:00
static void
heap_pages_expand_sorted_to ( rb_objspace_t * objspace , size_t next_length )
{
struct heap_page * * sorted ;
size_t size = next_length * sizeof ( struct heap_page * ) ;
gc_report ( 3 , objspace , " heap_pages_expand_sorted: next_length: %d, size: %d \n " , ( int ) next_length , ( int ) size ) ;
if ( heap_pages_sorted_length > 0 ) {
sorted = ( struct heap_page * * ) realloc ( heap_pages_sorted , size ) ;
if ( sorted ) heap_pages_sorted = sorted ;
}
else {
sorted = heap_pages_sorted = ( struct heap_page * * ) malloc ( size ) ;
}
if ( sorted = = 0 ) {
rb_memerror ( ) ;
}
heap_pages_sorted_length = next_length ;
}
2009-01-12 06:41:20 +03:00
static void
2013-10-23 12:48:54 +04:00
heap_pages_expand_sorted ( rb_objspace_t * objspace )
2009-01-12 06:41:20 +03:00
{
2017-06-22 09:42:26 +03:00
/* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
2018-01-15 15:00:06 +03:00
* because heap_allocatable_pages contains heap_tomb - > total_pages ( recycle heap_tomb pages ) .
2018-04-14 19:50:06 +03:00
* however , if there are pages which do not have empty slots , then try to create new pages
2017-06-22 09:42:26 +03:00
* so that the additional allocatable_pages counts ( heap_tomb - > total_pages ) are added .
*/
2014-09-09 13:33:52 +04:00
size_t next_length = heap_allocatable_pages ;
2016-01-08 13:34:14 +03:00
next_length + = heap_eden - > total_pages ;
next_length + = heap_tomb - > total_pages ;
2013-10-23 12:48:54 +04:00
2014-09-09 13:33:52 +04:00
if ( next_length > heap_pages_sorted_length ) {
2017-06-22 03:59:54 +03:00
heap_pages_expand_sorted_to ( objspace , next_length ) ;
1999-12-14 09:50:43 +03:00
}
2017-06-22 09:42:26 +03:00
GC_ASSERT ( heap_allocatable_pages + heap_eden - > total_pages < = heap_pages_sorted_length ) ;
GC_ASSERT ( heap_allocated_pages < = heap_pages_sorted_length ) ;
}
static void
heap_allocatable_pages_set ( rb_objspace_t * objspace , size_t s )
{
heap_allocatable_pages = s ;
heap_pages_expand_sorted ( objspace ) ;
1999-12-14 09:50:43 +03:00
}
1999-08-13 09:45:20 +04:00
2017-06-22 09:42:26 +03:00
2013-07-16 12:32:32 +04:00
static inline void
2013-10-18 10:33:36 +04:00
heap_page_add_freeobj ( rb_objspace_t * objspace , struct heap_page * page , VALUE obj )
2013-07-16 12:32:32 +04:00
{
RVALUE * p = ( RVALUE * ) obj ;
2019-04-02 22:13:07 +03:00
unpoison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) , false ) ;
2013-07-16 12:32:32 +04:00
p - > as . free . flags = 0 ;
2013-10-18 10:33:36 +04:00
p - > as . free . next = page - > freelist ;
page - > freelist = p ;
2019-04-02 22:13:07 +03:00
__asan_poison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) ) ;
2014-09-08 08:11:00 +04:00
if ( RGENGC_CHECK_MODE & & ! is_pointer_to_heap ( objspace , p ) ) {
2018-01-02 09:41:40 +03:00
rb_bug ( " heap_page_add_freeobj: %p is not rvalue. " , ( void * ) p ) ;
2014-09-08 08:11:00 +04:00
}
2018-11-06 13:06:07 +03:00
poison_object ( obj ) ;
2014-09-08 08:11:00 +04:00
gc_report ( 3 , objspace , " heap_page_add_freeobj: add %p to freelist \n " , ( void * ) obj ) ;
2013-07-16 12:32:32 +04:00
}
static inline void
2013-10-22 14:28:31 +04:00
heap_add_freepage ( rb_objspace_t * objspace , rb_heap_t * heap , struct heap_page * page )
2013-07-16 12:32:32 +04:00
{
2019-04-02 22:13:07 +03:00
unpoison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) , false ) ;
2013-10-18 10:33:36 +04:00
if ( page - > freelist ) {
2013-10-22 14:28:31 +04:00
page - > free_next = heap - > free_pages ;
heap - > free_pages = page ;
2013-07-16 12:32:32 +04:00
}
2019-04-02 22:13:07 +03:00
poison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) ) ;
2013-07-16 12:32:32 +04:00
}
2014-11-14 04:44:57 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
2014-11-13 23:16:59 +03:00
static inline int
2014-09-08 08:11:00 +04:00
heap_add_poolpage ( rb_objspace_t * objspace , rb_heap_t * heap , struct heap_page * page )
{
2019-04-02 22:13:07 +03:00
unpoison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) , false ) ;
2014-09-08 08:11:00 +04:00
if ( page - > freelist ) {
page - > free_next = heap - > pooled_pages ;
heap - > pooled_pages = page ;
2014-11-13 23:16:59 +03:00
objspace - > rincgc . pooled_slots + = page - > free_slots ;
2019-04-02 22:13:07 +03:00
poison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) ) ;
2014-11-13 23:16:59 +03:00
return TRUE ;
}
else {
2019-04-02 22:13:07 +03:00
poison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) ) ;
2014-11-13 23:16:59 +03:00
return FALSE ;
2014-09-08 08:11:00 +04:00
}
}
2014-11-14 04:44:57 +03:00
# endif
2014-09-08 08:11:00 +04:00
2013-10-23 12:48:54 +04:00
static void
heap_unlink_page ( rb_objspace_t * objspace , rb_heap_t * heap , struct heap_page * page )
{
2018-05-16 23:39:30 +03:00
list_del ( & page - > page_node ) ;
2016-01-08 13:34:14 +03:00
heap - > total_pages - - ;
2014-09-08 08:11:00 +04:00
heap - > total_slots - = page - > total_slots ;
2013-10-23 12:48:54 +04:00
}
static void
heap_page_free ( rb_objspace_t * objspace , struct heap_page * page )
{
2014-09-09 13:33:52 +04:00
heap_allocated_pages - - ;
2014-09-10 06:13:41 +04:00
objspace - > profile . total_freed_pages + + ;
2018-10-31 00:53:56 +03:00
rb_aligned_free ( GET_PAGE_BODY ( page - > start ) ) ;
2013-10-23 12:48:54 +04:00
free ( page ) ;
}
static void
heap_pages_free_unused_pages ( rb_objspace_t * objspace )
{
size_t i , j ;
2018-05-16 23:39:30 +03:00
if ( ! list_empty ( & heap_tomb - > pages ) ) {
2014-09-09 13:33:52 +04:00
for ( i = j = 1 ; j < heap_allocated_pages ; i + + ) {
2014-02-26 13:46:45 +04:00
struct heap_page * page = heap_pages_sorted [ i ] ;
2016-01-08 11:23:58 +03:00
if ( page - > flags . in_tomb & & page - > free_slots = = page - > total_slots ) {
2016-03-31 11:21:35 +03:00
heap_unlink_page ( objspace , heap_tomb , page ) ;
heap_page_free ( objspace , page ) ;
2013-10-23 12:48:54 +04:00
}
2016-03-31 11:21:35 +03:00
else {
if ( i ! = j ) {
heap_pages_sorted [ j ] = page ;
}
j + + ;
2013-10-23 12:48:54 +04:00
}
}
2017-06-22 08:03:18 +03:00
GC_ASSERT ( j = = heap_allocated_pages ) ;
2013-10-23 12:48:54 +04:00
}
}
2013-10-22 14:28:31 +04:00
static struct heap_page *
heap_page_allocate ( rb_objspace_t * objspace )
2008-08-11 13:36:57 +04:00
{
2013-10-22 14:28:31 +04:00
RVALUE * start , * end , * p ;
2013-10-23 12:48:54 +04:00
struct heap_page * page ;
2013-10-18 10:33:36 +04:00
struct heap_page_body * page_body = 0 ;
2012-08-05 14:39:37 +04:00
size_t hi , lo , mid ;
2016-01-09 01:15:40 +03:00
int limit = HEAP_PAGE_OBJ_LIMIT ;
2013-06-21 05:26:50 +04:00
2013-10-18 10:33:36 +04:00
/* assign heap_page body (contains heap_page_header and RVALUEs) */
2018-10-31 00:53:56 +03:00
page_body = ( struct heap_page_body * ) rb_aligned_malloc ( HEAP_PAGE_ALIGN , HEAP_PAGE_SIZE ) ;
2013-10-18 10:33:36 +04:00
if ( page_body = = 0 ) {
2012-08-05 14:39:37 +04:00
rb_memerror ( ) ;
2009-01-12 06:41:20 +03:00
}
2013-06-21 05:26:50 +04:00
2013-10-18 10:33:36 +04:00
/* assign heap_page entry */
2014-07-26 01:34:35 +04:00
page = ( struct heap_page * ) calloc ( 1 , sizeof ( struct heap_page ) ) ;
2013-10-18 10:33:36 +04:00
if ( page = = 0 ) {
2018-10-31 00:54:13 +03:00
rb_aligned_free ( page_body ) ;
2013-07-16 12:32:32 +04:00
rb_memerror ( ) ;
2009-01-12 06:41:20 +03:00
}
2016-01-08 13:56:27 +03:00
/* adjust obj_limit (object number available in this page) */
start = ( RVALUE * ) ( ( VALUE ) page_body + sizeof ( struct heap_page_header ) ) ;
if ( ( VALUE ) start % sizeof ( RVALUE ) ! = 0 ) {
int delta = ( int ) ( sizeof ( RVALUE ) - ( ( VALUE ) start % sizeof ( RVALUE ) ) ) ;
start = ( RVALUE * ) ( ( VALUE ) start + delta ) ;
2016-01-09 01:15:40 +03:00
limit = ( HEAP_PAGE_SIZE - ( int ) ( ( VALUE ) start - ( VALUE ) page_body ) ) / ( int ) sizeof ( RVALUE ) ;
2016-01-08 13:56:27 +03:00
}
end = start + limit ;
2013-07-16 12:32:32 +04:00
2013-10-22 14:28:31 +04:00
/* setup heap_pages_sorted */
2012-08-05 14:39:37 +04:00
lo = 0 ;
2014-09-09 13:33:52 +04:00
hi = heap_allocated_pages ;
2012-08-05 14:39:37 +04:00
while ( lo < hi ) {
2013-10-18 10:33:36 +04:00
struct heap_page * mid_page ;
2013-07-16 12:32:32 +04:00
2012-08-05 14:39:37 +04:00
mid = ( lo + hi ) / 2 ;
2013-10-22 14:28:31 +04:00
mid_page = heap_pages_sorted [ mid ] ;
2016-01-08 13:56:27 +03:00
if ( mid_page - > start < start ) {
2012-08-05 14:39:37 +04:00
lo = mid + 1 ;
}
2016-01-08 13:56:27 +03:00
else if ( mid_page - > start > start ) {
2012-08-05 14:39:37 +04:00
hi = mid ;
2009-01-12 06:41:20 +03:00
}
else {
2013-10-18 10:33:36 +04:00
rb_bug ( " same heap page is allocated: %p at % " PRIuVALUE , ( void * ) page_body , ( VALUE ) mid ) ;
2009-01-12 06:41:20 +03:00
}
}
2017-06-22 09:42:26 +03:00
2014-09-09 13:33:52 +04:00
if ( hi < heap_allocated_pages ) {
MEMMOVE ( & heap_pages_sorted [ hi + 1 ] , & heap_pages_sorted [ hi ] , struct heap_page_header * , heap_allocated_pages - hi ) ;
2012-11-10 19:04:27 +04:00
}
2013-10-22 14:28:31 +04:00
2013-10-23 12:48:54 +04:00
heap_pages_sorted [ hi ] = page ;
2014-09-09 13:33:52 +04:00
heap_allocated_pages + + ;
2017-06-22 09:42:26 +03:00
GC_ASSERT ( heap_eden - > total_pages + heap_allocatable_pages < = heap_pages_sorted_length ) ;
GC_ASSERT ( heap_eden - > total_pages + heap_tomb - > total_pages = = heap_allocated_pages - 1 ) ;
GC_ASSERT ( heap_allocated_pages < = heap_pages_sorted_length ) ;
2014-09-10 06:13:41 +04:00
objspace - > profile . total_allocated_pages + + ;
2017-06-22 03:59:54 +03:00
if ( heap_allocated_pages > heap_pages_sorted_length ) {
rb_bug ( " heap_page_allocate: allocated(% " PRIdSIZE " ) > sorted(% " PRIdSIZE " ) " ,
heap_allocated_pages , heap_pages_sorted_length ) ;
}
2013-06-21 05:26:50 +04:00
2013-10-23 12:48:54 +04:00
if ( heap_pages_lomem = = 0 | | heap_pages_lomem > start ) heap_pages_lomem = start ;
if ( heap_pages_himem < end ) heap_pages_himem = end ;
2013-10-18 10:33:36 +04:00
page - > start = start ;
2014-09-08 08:11:00 +04:00
page - > total_slots = limit ;
2013-10-23 12:48:54 +04:00
page_body - > header . page = page ;
2009-01-12 06:41:20 +03:00
2013-07-16 12:32:32 +04:00
for ( p = start ; p ! = end ; p + + ) {
2018-01-02 09:41:40 +03:00
gc_report ( 3 , objspace , " assign_heap_page: %p is added to freelist \n " , ( void * ) p ) ;
2013-10-18 10:33:36 +04:00
heap_page_add_freeobj ( objspace , page , ( VALUE ) p ) ;
2012-08-05 14:39:37 +04:00
}
2014-09-08 08:11:00 +04:00
page - > free_slots = limit ;
2013-06-21 05:26:50 +04:00
2019-04-02 22:13:07 +03:00
poison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) ) ;
2013-10-22 14:28:31 +04:00
return page ;
2012-08-05 14:39:37 +04:00
}
2009-09-17 13:34:20 +04:00
2013-10-23 12:48:54 +04:00
static struct heap_page *
heap_page_resurrect ( rb_objspace_t * objspace )
1998-01-16 15:13:05 +03:00
{
2018-05-16 23:39:30 +03:00
struct heap_page * page = 0 , * next ;
2000-09-25 21:51:29 +04:00
2018-05-16 23:39:30 +03:00
list_for_each_safe ( & heap_tomb - > pages , page , next , page_node ) {
2019-04-02 22:13:07 +03:00
unpoison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) , false ) ;
2016-11-04 11:54:46 +03:00
if ( page - > freelist ! = NULL ) {
heap_unlink_page ( objspace , heap_tomb , page ) ;
2019-04-02 22:13:07 +03:00
poison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) ) ;
2016-11-04 11:54:46 +03:00
return page ;
}
2013-10-23 12:48:54 +04:00
}
2016-11-04 11:54:46 +03:00
2013-10-29 11:25:45 +04:00
return NULL ;
2013-10-23 12:48:54 +04:00
}
static struct heap_page *
heap_page_create ( rb_objspace_t * objspace )
{
2017-06-22 09:42:26 +03:00
struct heap_page * page ;
2013-10-23 12:48:54 +04:00
const char * method = " recycle " ;
2017-06-22 09:42:26 +03:00
heap_allocatable_pages - - ;
page = heap_page_resurrect ( objspace ) ;
2013-10-23 12:48:54 +04:00
if ( page = = NULL ) {
page = heap_page_allocate ( objspace ) ;
method = " allocate " ;
}
2016-01-08 13:34:14 +03:00
if ( 0 ) fprintf ( stderr , " heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d \n " ,
2018-01-02 09:41:40 +03:00
method , ( void * ) page , ( int ) heap_pages_sorted_length , ( int ) heap_allocated_pages , ( int ) heap_tomb - > total_pages ) ;
2013-10-23 12:48:54 +04:00
return page ;
}
static void
heap_add_page ( rb_objspace_t * objspace , rb_heap_t * heap , struct heap_page * page )
{
2016-01-08 11:23:58 +03:00
page - > flags . in_tomb = ( heap = = heap_tomb ) ;
2018-05-16 23:39:30 +03:00
list_add ( & heap - > pages , & page - > page_node ) ;
2016-01-08 13:34:14 +03:00
heap - > total_pages + + ;
2014-09-08 08:11:00 +04:00
heap - > total_slots + = page - > total_slots ;
2013-10-23 12:48:54 +04:00
}
2008-06-08 14:27:06 +04:00
2013-10-23 12:48:54 +04:00
static void
heap_assign_page ( rb_objspace_t * objspace , rb_heap_t * heap )
{
struct heap_page * page = heap_page_create ( objspace ) ;
heap_add_page ( objspace , heap , page ) ;
2013-10-22 14:28:31 +04:00
heap_add_freepage ( objspace , heap , page ) ;
}
static void
heap_add_pages ( rb_objspace_t * objspace , rb_heap_t * heap , size_t add )
{
size_t i ;
2017-06-22 09:42:26 +03:00
heap_allocatable_pages_set ( objspace , add ) ;
2012-08-05 14:39:37 +04:00
for ( i = 0 ; i < add ; i + + ) {
2013-10-22 14:28:31 +04:00
heap_assign_page ( objspace , heap ) ;
2012-08-05 14:39:37 +04:00
}
2017-06-22 09:42:26 +03:00
GC_ASSERT ( heap_allocatable_pages = = 0 ) ;
2011-05-22 13:26:46 +04:00
}
2014-05-15 13:54:49 +04:00
static size_t
2016-03-31 11:21:35 +03:00
heap_extend_pages ( rb_objspace_t * objspace , size_t free_slots , size_t total_slots )
1998-01-16 15:13:05 +03:00
{
2016-03-31 12:16:48 +03:00
double goal_ratio = gc_params . heap_free_slots_goal_ratio ;
size_t used = heap_allocated_pages + heap_allocatable_pages ;
size_t next_used ;
if ( goal_ratio = = 0.0 ) {
next_used = ( size_t ) ( used * gc_params . growth_factor ) ;
}
else {
/* Find `f' where free_slots = f * total_slots * goal_ratio
* = > f = ( total_slots - free_slots ) / ( ( 1 - goal_ratio ) * total_slots )
*/
double f = ( double ) ( total_slots - free_slots ) / ( ( 1 - goal_ratio ) * total_slots ) ;
if ( f > gc_params . growth_factor ) f = gc_params . growth_factor ;
if ( f < 1.0 ) f = 1.1 ;
next_used = ( size_t ) ( f * used ) ;
2016-03-31 15:46:15 +03:00
if ( 0 ) {
fprintf ( stderr ,
2016-09-13 15:33:13 +03:00
" free_slots(%8 " PRIuSIZE " )/total_slots(%8 " PRIuSIZE " )=%1.2f, "
2016-03-31 15:46:15 +03:00
" G(%1.2f), f(%1.2f), "
2016-09-13 15:33:13 +03:00
" used(%8 " PRIuSIZE " ) => next_used(%8 " PRIuSIZE " ) \n " ,
2016-03-31 15:46:15 +03:00
free_slots , total_slots , free_slots / ( double ) total_slots ,
goal_ratio , f , used , next_used ) ;
}
2016-03-31 12:16:48 +03:00
}
2014-05-15 13:54:49 +04:00
2013-11-23 07:33:10 +04:00
if ( gc_params . growth_max_slots > 0 ) {
2016-03-31 12:16:48 +03:00
size_t max_used = ( size_t ) ( used + gc_params . growth_max_slots / HEAP_PAGE_OBJ_LIMIT ) ;
if ( next_used > max_used ) next_used = max_used ;
2013-10-24 02:47:29 +04:00
}
2013-11-22 11:40:27 +04:00
2016-03-31 12:16:48 +03:00
return next_used - used ;
2014-05-15 14:53:40 +04:00
}
2013-11-22 11:40:27 +04:00
2014-05-15 13:54:49 +04:00
static void
heap_set_increment ( rb_objspace_t * objspace , size_t additional_pages )
{
2016-01-08 13:34:14 +03:00
size_t used = heap_eden - > total_pages ;
2014-05-15 13:54:49 +04:00
size_t next_used_limit = used + additional_pages ;
2014-09-09 13:33:52 +04:00
if ( next_used_limit = = heap_allocated_pages ) next_used_limit + + ;
2014-05-15 13:54:49 +04:00
2017-06-22 09:42:26 +03:00
heap_allocatable_pages_set ( objspace , next_used_limit - used ) ;
2014-09-08 08:11:00 +04:00
2014-09-09 13:33:52 +04:00
gc_report ( 1 , objspace , " heap_set_increment: heap_allocatable_pages is %d \n " , ( int ) heap_allocatable_pages ) ;
1998-01-16 15:13:05 +03:00
}
2012-08-05 14:39:37 +04:00
static int
2013-10-23 14:16:01 +04:00
heap_increment ( rb_objspace_t * objspace , rb_heap_t * heap )
2008-06-08 14:27:06 +04:00
{
2014-09-09 13:33:52 +04:00
if ( heap_allocatable_pages > 0 ) {
2016-01-08 13:34:14 +03:00
gc_report ( 1 , objspace , " heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d \n " ,
( int ) heap_pages_sorted_length , ( int ) heap_allocatable_pages , ( int ) heap - > total_pages ) ;
2017-06-22 09:42:26 +03:00
GC_ASSERT ( heap_allocatable_pages + heap_eden - > total_pages < = heap_pages_sorted_length ) ;
GC_ASSERT ( heap_allocated_pages < = heap_pages_sorted_length ) ;
2013-10-22 14:28:31 +04:00
heap_assign_page ( objspace , heap ) ;
2012-08-05 14:39:37 +04:00
return TRUE ;
2012-01-11 18:09:10 +04:00
}
2012-08-05 14:39:37 +04:00
return FALSE ;
}
2008-06-08 14:27:06 +04:00
2014-09-08 08:11:00 +04:00
static void
heap_prepare ( rb_objspace_t * objspace , rb_heap_t * heap )
2013-07-16 14:29:31 +04:00
{
2017-06-22 08:03:18 +03:00
GC_ASSERT ( heap - > free_pages = = NULL ) ;
2013-07-16 14:29:31 +04:00
2014-09-08 08:11:00 +04:00
if ( is_lazy_sweeping ( heap ) ) {
gc_sweep_continue ( objspace , heap ) ;
}
else if ( is_incremental_marking ( objspace ) ) {
gc_marks_continue ( objspace , heap ) ;
2013-07-16 14:29:31 +04:00
}
2014-09-08 08:11:00 +04:00
if ( heap - > free_pages = = NULL & &
( will_be_incremental_marking ( objspace ) | | heap_increment ( objspace , heap ) = = FALSE ) & &
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
gc_start ( objspace , GPR_FLAG_NEWOBJ ) = = FALSE ) {
2013-07-16 14:29:31 +04:00
rb_memerror ( ) ;
}
}
2013-12-19 09:04:55 +04:00
static RVALUE *
heap_get_freeobj_from_next_freepage ( rb_objspace_t * objspace , rb_heap_t * heap )
2013-07-16 14:29:31 +04:00
{
2013-10-18 10:33:36 +04:00
struct heap_page * page ;
2013-12-19 09:04:55 +04:00
RVALUE * p ;
2013-07-16 14:29:31 +04:00
2015-10-30 13:09:50 +03:00
while ( heap - > free_pages = = NULL ) {
2014-09-08 08:11:00 +04:00
heap_prepare ( objspace , heap ) ;
2013-07-16 14:29:31 +04:00
}
2014-09-08 08:11:00 +04:00
page = heap - > free_pages ;
2013-10-22 14:28:31 +04:00
heap - > free_pages = page - > free_next ;
2013-12-19 09:04:55 +04:00
heap - > using_page = page ;
2013-07-16 14:29:31 +04:00
2017-06-22 08:03:18 +03:00
GC_ASSERT ( page - > free_slots ! = 0 ) ;
2019-04-02 22:13:07 +03:00
unpoison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) , false ) ;
2013-12-19 09:04:55 +04:00
p = page - > freelist ;
page - > freelist = NULL ;
2019-04-02 22:13:07 +03:00
poison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) ) ;
2014-09-08 08:11:00 +04:00
page - > free_slots = 0 ;
2018-11-06 13:06:07 +03:00
unpoison_object ( ( VALUE ) p , true ) ;
2013-12-19 09:04:55 +04:00
return p ;
2013-07-16 14:29:31 +04:00
}
2015-10-07 14:18:17 +03:00
static inline VALUE
heap_get_freeobj_head ( rb_objspace_t * objspace , rb_heap_t * heap )
{
RVALUE * p = heap - > freelist ;
if ( LIKELY ( p ! = NULL ) ) {
heap - > freelist = p - > as . free . next ;
}
2018-11-06 13:06:07 +03:00
unpoison_object ( ( VALUE ) p , true ) ;
2015-10-07 14:18:17 +03:00
return ( VALUE ) p ;
}
2013-07-16 14:29:31 +04:00
static inline VALUE
2013-10-22 14:28:31 +04:00
heap_get_freeobj ( rb_objspace_t * objspace , rb_heap_t * heap )
2013-07-16 14:29:31 +04:00
{
2013-10-22 14:28:31 +04:00
RVALUE * p = heap - > freelist ;
2013-07-16 14:29:31 +04:00
2013-12-19 09:04:55 +04:00
while ( 1 ) {
2014-09-09 09:12:15 +04:00
if ( LIKELY ( p ! = NULL ) ) {
2018-11-06 13:06:07 +03:00
unpoison_object ( ( VALUE ) p , true ) ;
2013-12-19 09:04:55 +04:00
heap - > freelist = p - > as . free . next ;
return ( VALUE ) p ;
}
else {
p = heap_get_freeobj_from_next_freepage ( objspace , heap ) ;
}
2013-07-16 14:29:31 +04:00
}
}
2013-05-27 04:21:02 +04:00
void
rb_objspace_set_event_hook ( const rb_event_flag_t event )
{
rb_objspace_t * objspace = & rb_objspace ;
objspace - > hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK ;
2015-10-29 09:17:07 +03:00
objspace - > flags . has_hook = ( objspace - > hook_events ! = 0 ) ;
2013-05-27 04:21:02 +04:00
}
static void
2017-10-29 16:19:14 +03:00
gc_event_hook_body ( rb_execution_context_t * ec , rb_objspace_t * objspace , const rb_event_flag_t event , VALUE data )
2013-05-27 04:21:02 +04:00
{
2018-09-14 10:44:44 +03:00
/* increment PC because source line is calculated with PC-1 */
const VALUE * pc = ec - > cfp - > pc + + ;
2017-10-29 16:19:14 +03:00
EXEC_EVENT_HOOK ( ec , event , ec - > cfp - > self , 0 , 0 , 0 , data ) ;
2018-09-14 10:44:44 +03:00
ec - > cfp - > pc = pc ;
2013-05-27 04:21:02 +04:00
}
2015-10-29 09:17:07 +03:00
# define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2015-10-29 10:21:24 +03:00
# define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2015-10-07 14:18:17 +03:00
2013-05-27 04:21:02 +04:00
# define gc_event_hook(objspace, event, data) do { \
2015-10-29 10:21:24 +03:00
if ( UNLIKELY ( gc_event_hook_needed_p ( objspace , event ) ) ) { \
2017-10-29 16:19:14 +03:00
gc_event_hook_body ( GET_EC ( ) , ( objspace ) , ( event ) , ( data ) ) ; \
2013-05-27 04:21:02 +04:00
} \
} while ( 0 )
2015-10-07 14:18:17 +03:00
static inline VALUE
2015-10-29 11:00:01 +03:00
newobj_init ( VALUE klass , VALUE flags , VALUE v1 , VALUE v2 , VALUE v3 , int wb_protected , rb_objspace_t * objspace , VALUE obj )
2008-04-27 07:20:35 +04:00
{
2017-06-22 08:03:18 +03:00
GC_ASSERT ( BUILTIN_TYPE ( obj ) = = T_NONE ) ;
GC_ASSERT ( ( flags & FL_WB_PROTECTED ) = = 0 ) ;
2014-09-08 08:11:00 +04:00
2013-05-15 14:26:22 +04:00
/* OBJSETUP */
2015-10-29 10:26:44 +03:00
RBASIC ( obj ) - > flags = flags ;
2013-12-19 05:51:16 +04:00
RBASIC_SET_CLASS_RAW ( obj , klass ) ;
2013-05-15 14:26:22 +04:00
RANY ( obj ) - > as . values . v1 = v1 ;
RANY ( obj ) - > as . values . v2 = v2 ;
RANY ( obj ) - > as . values . v3 = v3 ;
2014-09-08 08:11:00 +04:00
# if RGENGC_CHECK_MODE
2017-06-22 08:03:18 +03:00
GC_ASSERT ( RVALUE_MARKED ( obj ) = = FALSE ) ;
GC_ASSERT ( RVALUE_MARKING ( obj ) = = FALSE ) ;
GC_ASSERT ( RVALUE_OLD_P ( obj ) = = FALSE ) ;
GC_ASSERT ( RVALUE_WB_UNPROTECTED ( obj ) = = FALSE ) ;
2014-12-11 13:15:30 +03:00
if ( flags & FL_PROMOTED1 ) {
if ( RVALUE_AGE ( obj ) ! = 2 ) rb_bug ( " newobj: %s of age (%d) != 2. " , obj_info ( obj ) , RVALUE_AGE ( obj ) ) ;
}
else {
if ( RVALUE_AGE ( obj ) > 0 ) rb_bug ( " newobj: %s of age (%d) > 0. " , obj_info ( obj ) , RVALUE_AGE ( obj ) ) ;
}
2014-09-10 02:32:09 +04:00
if ( rgengc_remembered ( objspace , ( VALUE ) obj ) ) rb_bug ( " newobj: %s is remembered. " , obj_info ( obj ) ) ;
2014-09-08 08:11:00 +04:00
# endif
# if USE_RGENGC
2015-10-29 10:26:44 +03:00
if ( UNLIKELY ( wb_protected = = FALSE ) ) {
2014-09-08 08:11:00 +04:00
MARK_IN_BITMAP ( GET_HEAP_WB_UNPROTECTED_BITS ( obj ) , obj ) ;
}
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if RGENGC_PROFILE
2015-10-29 10:26:44 +03:00
if ( wb_protected ) {
2014-09-10 02:32:09 +04:00
objspace - > profile . total_generated_normal_object_count + + ;
2013-06-18 06:27:37 +04:00
# if RGENGC_PROFILE >= 2
objspace - > profile . generated_normal_object_count_types [ BUILTIN_TYPE ( obj ) ] + + ;
# endif
}
2013-05-15 12:07:30 +04:00
else {
2014-09-10 02:32:09 +04:00
objspace - > profile . total_generated_shady_object_count + + ;
2013-05-15 12:07:30 +04:00
# if RGENGC_PROFILE >= 2
objspace - > profile . generated_shady_object_count_types [ BUILTIN_TYPE ( obj ) ] + + ;
# endif
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# endif
2014-09-10 02:32:09 +04:00
# if GC_DEBUG
2017-11-16 08:52:19 +03:00
RANY ( obj ) - > file = rb_source_location_cstr ( & RANY ( obj ) - > line ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( ! SPECIAL_CONST_P ( obj ) ) ; /* check alignment */
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# endif
2014-09-09 14:01:18 +04:00
objspace - > total_allocated_objects + + ;
2015-10-07 14:18:17 +03:00
2014-09-10 02:32:09 +04:00
gc_report ( 5 , objspace , " newobj: %s \n " , obj_info ( obj ) ) ;
2015-03-18 22:54:14 +03:00
# if RGENGC_OLD_NEWOBJ_CHECK > 0
{
static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK ;
2015-03-19 10:14:12 +03:00
if ( ! is_incremental_marking ( objspace ) & &
flags & FL_WB_PROTECTED & & /* do not promote WB unprotected objects */
2015-03-18 22:54:14 +03:00
! RB_TYPE_P ( obj , T_ARRAY ) ) { /* array.c assumes that allocated objects are new */
if ( - - newobj_cnt = = 0 ) {
newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK ;
gc_mark_set ( objspace , obj ) ;
RVALUE_AGE_SET_OLD ( objspace , obj ) ;
2015-03-19 10:14:12 +03:00
rb_gc_writebarrier_remember ( obj ) ;
2015-03-18 22:54:14 +03:00
}
}
}
# endif
check_rvalue_consistency ( obj ) ;
2012-08-05 14:39:37 +04:00
return obj ;
2011-05-22 13:26:46 +04:00
}
2008-06-08 14:27:06 +04:00
2015-10-30 12:33:08 +03:00
static inline VALUE
newobj_slowpath ( VALUE klass , VALUE flags , VALUE v1 , VALUE v2 , VALUE v3 , rb_objspace_t * objspace , int wb_protected )
2015-10-07 14:18:17 +03:00
{
VALUE obj ;
if ( UNLIKELY ( during_gc | | ruby_gc_stressful ) ) {
if ( during_gc ) {
dont_gc = 1 ;
during_gc = 0 ;
rb_bug ( " object allocation during garbage collection phase " ) ;
}
if ( ruby_gc_stressful ) {
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
if ( ! garbage_collect ( objspace , GPR_FLAG_NEWOBJ ) ) {
2015-10-07 14:18:17 +03:00
rb_memerror ( ) ;
}
}
}
obj = heap_get_freeobj ( objspace , heap_eden ) ;
2015-10-30 13:30:47 +03:00
newobj_init ( klass , flags , v1 , v2 , v3 , wb_protected , objspace , obj ) ;
2015-10-29 11:00:01 +03:00
gc_event_hook ( objspace , RUBY_INTERNAL_EVENT_NEWOBJ , obj ) ;
return obj ;
2015-10-07 14:18:17 +03:00
}
2015-10-30 12:33:08 +03:00
NOINLINE ( static VALUE newobj_slowpath_wb_protected ( VALUE klass , VALUE flags , VALUE v1 , VALUE v2 , VALUE v3 , rb_objspace_t * objspace ) ) ;
NOINLINE ( static VALUE newobj_slowpath_wb_unprotected ( VALUE klass , VALUE flags , VALUE v1 , VALUE v2 , VALUE v3 , rb_objspace_t * objspace ) ) ;
static VALUE
newobj_slowpath_wb_protected ( VALUE klass , VALUE flags , VALUE v1 , VALUE v2 , VALUE v3 , rb_objspace_t * objspace )
{
return newobj_slowpath ( klass , flags , v1 , v2 , v3 , objspace , TRUE ) ;
}
static VALUE
newobj_slowpath_wb_unprotected ( VALUE klass , VALUE flags , VALUE v1 , VALUE v2 , VALUE v3 , rb_objspace_t * objspace )
{
return newobj_slowpath ( klass , flags , v1 , v2 , v3 , objspace , FALSE ) ;
}
2015-10-10 09:34:24 +03:00
static inline VALUE
2015-10-29 10:26:44 +03:00
newobj_of ( VALUE klass , VALUE flags , VALUE v1 , VALUE v2 , VALUE v3 , int wb_protected )
2015-10-07 14:18:17 +03:00
{
rb_objspace_t * objspace = & rb_objspace ;
VALUE obj ;
2018-09-25 21:13:29 +03:00
RB_DEBUG_COUNTER_INC ( obj_newobj ) ;
( void ) RB_DEBUG_COUNTER_INC_IF ( obj_newobj_wb_unprotected , ! wb_protected ) ;
2015-10-07 14:18:17 +03:00
# if GC_DEBUG_STRESS_TO_CLASS
if ( UNLIKELY ( stress_to_class ) ) {
2018-10-31 00:02:12 +03:00
long i , cnt = RARRAY_LEN ( stress_to_class ) ;
for ( i = 0 ; i < cnt ; + + i ) {
2018-10-30 06:22:09 +03:00
if ( klass = = RARRAY_AREF ( stress_to_class , i ) ) rb_memerror ( ) ;
2018-10-31 00:02:12 +03:00
}
2015-10-07 14:18:17 +03:00
}
# endif
2015-10-30 12:33:08 +03:00
if ( ! ( during_gc | |
ruby_gc_stressful | |
gc_event_hook_available_p ( objspace ) ) & &
( obj = heap_get_freeobj_head ( objspace , heap_eden ) ) ! = Qfalse ) {
2015-10-29 11:00:01 +03:00
return newobj_init ( klass , flags , v1 , v2 , v3 , wb_protected , objspace , obj ) ;
2015-10-07 14:18:17 +03:00
}
else {
2018-09-25 21:13:29 +03:00
RB_DEBUG_COUNTER_INC ( obj_newobj_slowpath ) ;
2015-10-30 12:33:08 +03:00
return wb_protected ?
newobj_slowpath_wb_protected ( klass , flags , v1 , v2 , v3 , objspace ) :
newobj_slowpath_wb_unprotected ( klass , flags , v1 , v2 , v3 , objspace ) ;
2015-10-07 14:18:17 +03:00
}
}
2015-10-29 10:26:44 +03:00
VALUE
rb_wb_unprotected_newobj_of ( VALUE klass , VALUE flags )
{
2017-06-22 08:03:18 +03:00
GC_ASSERT ( ( flags & FL_WB_PROTECTED ) = = 0 ) ;
2015-10-29 10:26:44 +03:00
return newobj_of ( klass , flags , 0 , 0 , 0 , FALSE ) ;
}
VALUE
rb_wb_protected_newobj_of ( VALUE klass , VALUE flags )
{
2017-06-22 08:03:18 +03:00
GC_ASSERT ( ( flags & FL_WB_PROTECTED ) = = 0 ) ;
2015-10-29 10:26:44 +03:00
return newobj_of ( klass , flags , 0 , 0 , 0 , TRUE ) ;
2012-10-20 10:57:51 +04:00
}
2015-10-30 12:33:08 +03:00
/* for compatibility */
VALUE
rb_newobj ( void )
{
return newobj_of ( 0 , T_NONE , 0 , 0 , 0 , FALSE ) ;
}
2012-10-20 10:57:51 +04:00
VALUE
rb_newobj_of ( VALUE klass , VALUE flags )
{
2015-10-29 10:26:44 +03:00
return newobj_of ( klass , flags & ~ FL_WB_PROTECTED , 0 , 0 , 0 , flags & FL_WB_PROTECTED ) ;
2012-10-20 10:57:51 +04:00
}
2017-11-04 17:32:48 +03:00
# define UNEXPECTED_NODE(func) \
rb_bug ( # func " (): GC does not handle T_NODE 0x%x(%p) 0x% " PRIxVALUE , \
BUILTIN_TYPE ( obj ) , ( void * ) ( obj ) , RBASIC ( obj ) - > flags )
2015-03-28 02:51:09 +03:00
# undef rb_imemo_new
2015-03-11 13:36:17 +03:00
VALUE
rb_imemo_new ( enum imemo_type type , VALUE v1 , VALUE v2 , VALUE v3 , VALUE v0 )
{
2015-10-29 10:43:48 +03:00
VALUE flags = T_IMEMO | ( type < < FL_USHIFT ) ;
2015-10-29 10:26:44 +03:00
return newobj_of ( v0 , flags , v1 , v2 , v3 , TRUE ) ;
2015-03-11 13:36:17 +03:00
}
2018-05-09 10:08:53 +03:00
static VALUE
2018-05-09 10:11:59 +03:00
rb_imemo_tmpbuf_new ( VALUE v1 , VALUE v2 , VALUE v3 , VALUE v0 )
2017-10-26 11:45:14 +03:00
{
2018-05-09 10:11:59 +03:00
VALUE flags = T_IMEMO | ( imemo_tmpbuf < < FL_USHIFT ) ;
2018-05-09 10:08:53 +03:00
return newobj_of ( v0 , flags , v1 , v2 , v3 , FALSE ) ;
}
VALUE
2018-05-09 10:11:59 +03:00
rb_imemo_tmpbuf_auto_free_pointer ( void * buf )
2018-05-09 10:08:53 +03:00
{
2018-05-09 10:11:59 +03:00
return rb_imemo_new ( imemo_tmpbuf , ( VALUE ) buf , 0 , 0 , 0 ) ;
2018-05-09 10:08:53 +03:00
}
VALUE
2018-05-09 10:11:59 +03:00
rb_imemo_tmpbuf_auto_free_maybe_mark_buffer ( void * buf , size_t cnt )
2018-05-09 10:08:53 +03:00
{
2018-05-09 10:11:59 +03:00
return rb_imemo_tmpbuf_new ( ( VALUE ) buf , 0 , ( VALUE ) cnt , 0 ) ;
2018-05-09 10:08:53 +03:00
}
2018-05-09 10:11:59 +03:00
rb_imemo_tmpbuf_t *
rb_imemo_tmpbuf_parser_heap ( void * buf , rb_imemo_tmpbuf_t * old_heap , size_t cnt )
2018-05-09 10:08:53 +03:00
{
2018-05-09 10:11:59 +03:00
return ( rb_imemo_tmpbuf_t * ) rb_imemo_tmpbuf_new ( ( VALUE ) buf , ( VALUE ) old_heap , ( VALUE ) cnt , 0 ) ;
2017-10-26 11:45:14 +03:00
}
2015-03-18 22:57:53 +03:00
# if IMEMO_DEBUG
VALUE
rb_imemo_new_debug ( enum imemo_type type , VALUE v1 , VALUE v2 , VALUE v3 , VALUE v0 , const char * file , int line )
{
2015-03-28 02:51:09 +03:00
VALUE memo = rb_imemo_new ( type , v1 , v2 , v3 , v0 ) ;
2018-01-02 09:41:40 +03:00
fprintf ( stderr , " memo %p (type: %d) @ %s:%d \n " , ( void * ) memo , imemo_type ( memo ) , file , line ) ;
2015-03-18 22:57:53 +03:00
return memo ;
}
# endif
2012-08-05 14:39:37 +04:00
VALUE
2015-05-16 15:17:14 +03:00
rb_data_object_wrap ( VALUE klass , void * datap , RUBY_DATA_FUNC dmark , RUBY_DATA_FUNC dfree )
2011-05-22 13:26:46 +04:00
{
2013-05-13 10:25:33 +04:00
if ( klass ) Check_Type ( klass , T_CLASS ) ;
2015-10-29 10:26:44 +03:00
return newobj_of ( klass , T_DATA , ( VALUE ) dmark , ( VALUE ) dfree , ( VALUE ) datap , FALSE ) ;
2008-06-08 14:27:06 +04:00
}
2015-05-19 05:18:49 +03:00
# undef rb_data_object_alloc
RUBY_ALIAS_FUNCTION ( rb_data_object_alloc ( VALUE klass , void * datap ,
RUBY_DATA_FUNC dmark , RUBY_DATA_FUNC dfree ) ,
2015-12-15 17:20:27 +03:00
rb_data_object_wrap , ( klass , datap , dmark , dfree ) )
2015-05-19 05:18:49 +03:00
2015-05-10 18:20:35 +03:00
VALUE
rb_data_object_zalloc ( VALUE klass , size_t size , RUBY_DATA_FUNC dmark , RUBY_DATA_FUNC dfree )
{
2015-05-16 15:17:14 +03:00
VALUE obj = rb_data_object_wrap ( klass , 0 , dmark , dfree ) ;
2015-05-10 18:20:35 +03:00
DATA_PTR ( obj ) = xcalloc ( 1 , size ) ;
return obj ;
}
2012-08-05 14:39:37 +04:00
VALUE
2015-05-16 15:17:14 +03:00
rb_data_typed_object_wrap ( VALUE klass , void * datap , const rb_data_type_t * type )
2008-06-08 14:27:06 +04:00
{
2012-08-05 14:39:37 +04:00
if ( klass ) Check_Type ( klass , T_CLASS ) ;
2015-10-29 10:26:44 +03:00
return newobj_of ( klass , T_DATA , ( VALUE ) type , ( VALUE ) 1 , ( VALUE ) datap , type - > flags & RUBY_FL_WB_PROTECTED ) ;
2008-04-27 07:20:35 +04:00
}
2015-05-19 05:18:49 +03:00
# undef rb_data_typed_object_alloc
RUBY_ALIAS_FUNCTION ( rb_data_typed_object_alloc ( VALUE klass , void * datap ,
const rb_data_type_t * type ) ,
2015-12-15 17:20:27 +03:00
rb_data_typed_object_wrap , ( klass , datap , type ) )
2015-05-19 05:18:49 +03:00
2015-05-10 18:20:35 +03:00
VALUE
rb_data_typed_object_zalloc ( VALUE klass , size_t size , const rb_data_type_t * type )
{
2015-05-16 15:17:14 +03:00
VALUE obj = rb_data_typed_object_wrap ( klass , 0 , type ) ;
2015-05-10 18:20:35 +03:00
DATA_PTR ( obj ) = xcalloc ( 1 , size ) ;
return obj ;
}
2012-08-05 14:39:37 +04:00
size_t
rb_objspace_data_type_memsize ( VALUE obj )
2008-04-27 07:20:35 +04:00
{
2015-12-08 20:28:32 +03:00
if ( RTYPEDDATA_P ( obj ) ) {
const rb_data_type_t * type = RTYPEDDATA_TYPE ( obj ) ;
const void * ptr = RTYPEDDATA_DATA ( obj ) ;
if ( ptr & & type - > function . dsize ) {
return type - > function . dsize ( ptr ) ;
}
2008-06-08 14:27:06 +04:00
}
2015-12-08 20:28:32 +03:00
return 0 ;
2005-10-05 20:15:16 +04:00
}
2012-08-05 14:39:37 +04:00
const char *
rb_objspace_data_type_name ( VALUE obj )
1999-08-13 09:45:20 +04:00
{
2012-08-05 14:39:37 +04:00
if ( RTYPEDDATA_P ( obj ) ) {
return RTYPEDDATA_TYPE ( obj ) - > wrap_struct_name ;
}
else {
return 0 ;
}
1999-08-13 09:45:20 +04:00
}
2016-05-08 20:44:51 +03:00
PUREFUNC ( static inline int is_pointer_to_heap ( rb_objspace_t * objspace , void * ptr ) ; )
2012-08-05 14:39:37 +04:00
static inline int
is_pointer_to_heap ( rb_objspace_t * objspace , void * ptr )
2012-01-10 07:49:10 +04:00
{
2012-08-05 14:39:37 +04:00
register RVALUE * p = RANY ( ptr ) ;
2013-10-18 10:33:36 +04:00
register struct heap_page * page ;
2012-08-05 14:39:37 +04:00
register size_t hi , lo , mid ;
2003-12-22 09:20:14 +03:00
2013-10-22 14:28:31 +04:00
if ( p < heap_pages_lomem | | p > heap_pages_himem ) return FALSE ;
2012-08-05 14:39:37 +04:00
if ( ( VALUE ) p % sizeof ( RVALUE ) ! = 0 ) return FALSE ;
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
/* check if p looks like a pointer using bsearch*/
lo = 0 ;
2014-09-09 13:33:52 +04:00
hi = heap_allocated_pages ;
2012-08-05 14:39:37 +04:00
while ( lo < hi ) {
mid = ( lo + hi ) / 2 ;
2013-10-22 14:28:31 +04:00
page = heap_pages_sorted [ mid ] ;
2013-10-18 10:33:36 +04:00
if ( page - > start < = p ) {
2014-09-08 08:11:00 +04:00
if ( p < page - > start + page - > total_slots ) {
2012-08-05 14:39:37 +04:00
return TRUE ;
2013-07-17 09:55:39 +04:00
}
2012-08-05 14:39:37 +04:00
lo = mid + 1 ;
}
else {
hi = mid ;
}
}
return FALSE ;
1998-01-16 15:13:05 +03:00
}
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
static enum rb_id_table_iterator_result
free_const_entry_i ( VALUE value , void * data )
1998-01-16 15:13:05 +03:00
{
2014-05-07 19:43:37 +04:00
rb_const_entry_t * ce = ( rb_const_entry_t * ) value ;
2012-08-05 14:39:37 +04:00
xfree ( ce ) ;
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
return ID_TABLE_CONTINUE ;
1998-01-16 15:13:05 +03:00
}
2000-02-08 11:54:01 +03:00
void
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
rb_free_const_table ( struct rb_id_table * tbl )
2000-02-08 11:54:01 +03:00
{
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
rb_id_table_foreach_values ( tbl , free_const_entry_i , 0 ) ;
rb_id_table_free ( tbl ) ;
2012-08-05 14:39:37 +04:00
}
2000-02-08 11:54:01 +03:00
2012-08-05 14:39:37 +04:00
static inline void
2014-06-04 17:33:20 +04:00
make_zombie ( rb_objspace_t * objspace , VALUE obj , void ( * dfree ) ( void * ) , void * data )
2008-04-25 13:03:32 +04:00
{
2014-06-04 17:33:20 +04:00
struct RZombie * zombie = RZOMBIE ( obj ) ;
zombie - > basic . flags = T_ZOMBIE ;
zombie - > dfree = dfree ;
zombie - > data = data ;
zombie - > next = heap_pages_deferred_final ;
heap_pages_deferred_final = ( VALUE ) zombie ;
2011-06-11 14:51:51 +04:00
}
2012-08-05 14:39:37 +04:00
static inline void
2014-06-04 17:33:20 +04:00
make_io_zombie ( rb_objspace_t * objspace , VALUE obj )
2011-06-11 14:51:51 +04:00
{
2014-06-04 17:33:20 +04:00
rb_io_t * fptr = RANY ( obj ) - > as . file . fptr ;
make_zombie ( objspace , obj , ( void ( * ) ( void * ) ) rb_io_fptr_finalize , fptr ) ;
2008-04-25 13:03:32 +04:00
}
2012-08-05 14:39:37 +04:00
static int
obj_free ( rb_objspace_t * objspace , VALUE obj )
2011-06-11 14:51:51 +04:00
{
2017-05-24 09:46:44 +03:00
RB_DEBUG_COUNTER_INC ( obj_free ) ;
2013-05-27 14:28:25 +04:00
gc_event_hook ( objspace , RUBY_INTERNAL_EVENT_FREEOBJ , obj ) ;
2013-05-27 04:21:02 +04:00
2012-08-05 14:39:37 +04:00
switch ( BUILTIN_TYPE ( obj ) ) {
case T_NIL :
case T_FIXNUM :
case T_TRUE :
case T_FALSE :
rb_bug ( " obj_free() called for broken object " ) ;
break ;
}
2011-06-11 14:51:51 +04:00
2012-08-05 14:39:37 +04:00
if ( FL_TEST ( obj , FL_EXIVAR ) ) {
rb_free_generic_ivar ( ( VALUE ) obj ) ;
FL_UNSET ( obj , FL_EXIVAR ) ;
2011-06-11 14:51:51 +04:00
}
2013-06-07 06:28:03 +04:00
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
if ( RVALUE_WB_UNPROTECTED ( obj ) ) CLEAR_IN_BITMAP ( GET_HEAP_WB_UNPROTECTED_BITS ( obj ) , obj ) ;
# if RGENGC_CHECK_MODE
# define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
CHECK ( RVALUE_WB_UNPROTECTED ) ;
CHECK ( RVALUE_MARKED ) ;
CHECK ( RVALUE_MARKING ) ;
2015-03-18 21:02:13 +03:00
CHECK ( RVALUE_UNCOLLECTIBLE ) ;
2014-09-08 08:11:00 +04:00
# undef CHECK
# endif
2013-06-07 06:28:03 +04:00
# endif
2012-08-05 14:39:37 +04:00
switch ( BUILTIN_TYPE ( obj ) ) {
case T_OBJECT :
2018-10-31 01:01:17 +03:00
if ( ( RANY ( obj ) - > as . basic . flags & ROBJECT_EMBED ) | |
RANY ( obj ) - > as . object . as . heap . ivptr = = NULL ) {
RB_DEBUG_COUNTER_INC ( obj_obj_embed ) ;
}
else if ( ROBJ_TRANSIENT_P ( obj ) ) {
RB_DEBUG_COUNTER_INC ( obj_obj_transient ) ;
2018-10-31 00:02:12 +03:00
}
else {
2018-10-31 01:01:17 +03:00
xfree ( RANY ( obj ) - > as . object . as . heap . ivptr ) ;
RB_DEBUG_COUNTER_INC ( obj_obj_ptr ) ;
2018-10-31 00:02:12 +03:00
}
2018-10-31 01:01:17 +03:00
break ;
2012-08-05 14:39:37 +04:00
case T_MODULE :
case T_CLASS :
mjit_compile.c: merge initial JIT compiler
which has been developed by Takashi Kokubun <takashikkbn@gmail> as
YARV-MJIT. Many of its bugs are fixed by wanabe <s.wanabe@gmail.com>.
This JIT compiler is designed to be a safe migration path to introduce
JIT compiler to MRI. So this commit does not include any bytecode
changes or dynamic instruction modifications, which are done in original
MJIT.
This commit even strips off some aggressive optimizations from
YARV-MJIT, and thus it's slower than YARV-MJIT too. But it's still
fairly faster than Ruby 2.5 in some benchmarks (attached below).
Note that this JIT compiler passes `make test`, `make test-all`, `make
test-spec` without JIT, and even with JIT. Not only it's perfectly safe
with JIT disabled because it does not replace VM instructions unlike
MJIT, but also with JIT enabled it stably runs Ruby applications
including Rails applications.
I'm expecting this version as just "initial" JIT compiler. I have many
optimization ideas which are skipped for initial merging, and you may
easily replace this JIT compiler with a faster one by just replacing
mjit_compile.c. `mjit_compile` interface is designed for the purpose.
common.mk: update dependencies for mjit_compile.c.
internal.h: declare `rb_vm_insn_addr2insn` for MJIT.
vm.c: exclude some definitions if `-DMJIT_HEADER` is provided to
compiler. This avoids to include some functions which take a long time
to compile, e.g. vm_exec_core. Some of the purpose is achieved in
transform_mjit_header.rb (see `IGNORED_FUNCTIONS`) but others are
manually resolved for now. Load mjit_helper.h for MJIT header.
mjit_helper.h: New. This is a file used only by JIT-ed code. I'll
refactor `mjit_call_cfunc` later.
vm_eval.c: add some #ifdef switches to skip compiling some functions
like Init_vm_eval.
win32/mkexports.rb: export thread/ec functions, which are used by MJIT.
include/ruby/defines.h: add MJIT_FUNC_EXPORTED macro alis to clarify
that a function is exported only for MJIT.
array.c: export a function used by MJIT.
bignum.c: ditto.
class.c: ditto.
compile.c: ditto.
error.c: ditto.
gc.c: ditto.
hash.c: ditto.
iseq.c: ditto.
numeric.c: ditto.
object.c: ditto.
proc.c: ditto.
re.c: ditto.
st.c: ditto.
string.c: ditto.
thread.c: ditto.
variable.c: ditto.
vm_backtrace.c: ditto.
vm_insnhelper.c: ditto.
vm_method.c: ditto.
I would like to improve maintainability of function exports, but I
believe this way is acceptable as initial merging if we clarify the
new exports are for MJIT (so that we can use them as TODO list to fix)
and add unit tests to detect unresolved symbols.
I'll add unit tests of JIT compilations in succeeding commits.
Author: Takashi Kokubun <takashikkbn@gmail.com>
Contributor: wanabe <s.wanabe@gmail.com>
Part of [Feature #14235]
---
* Known issues
* Code generated by gcc is faster than clang. The benchmark may be worse
in macOS. Following benchmark result is provided by gcc w/ Linux.
* Performance is decreased when Google Chrome is running
* JIT can work on MinGW, but it doesn't improve performance at least
in short running benchmark.
* Currently it doesn't perform well with Rails. We'll try to fix this
before release.
---
* Benchmark reslts
Benchmarked with:
Intel 4.0GHz i7-4790K with 16GB memory under x86-64 Ubuntu 8 Cores
- 2.0.0-p0: Ruby 2.0.0-p0
- r62186: Ruby trunk (early 2.6.0), before MJIT changes
- JIT off: On this commit, but without `--jit` option
- JIT on: On this commit, and with `--jit` option
** Optcarrot fps
Benchmark: https://github.com/mame/optcarrot
| |2.0.0-p0 |r62186 |JIT off |JIT on |
|:--------|:--------|:--------|:--------|:--------|
|fps |37.32 |51.46 |51.31 |58.88 |
|vs 2.0.0 |1.00x |1.38x |1.37x |1.58x |
** MJIT benchmarks
Benchmark: https://github.com/benchmark-driver/mjit-benchmarks
(Original: https://github.com/vnmakarov/ruby/tree/rtl_mjit_branch/MJIT-benchmarks)
| |2.0.0-p0 |r62186 |JIT off |JIT on |
|:----------|:--------|:--------|:--------|:--------|
|aread |1.00 |1.09 |1.07 |2.19 |
|aref |1.00 |1.13 |1.11 |2.22 |
|aset |1.00 |1.50 |1.45 |2.64 |
|awrite |1.00 |1.17 |1.13 |2.20 |
|call |1.00 |1.29 |1.26 |2.02 |
|const2 |1.00 |1.10 |1.10 |2.19 |
|const |1.00 |1.11 |1.10 |2.19 |
|fannk |1.00 |1.04 |1.02 |1.00 |
|fib |1.00 |1.32 |1.31 |1.84 |
|ivread |1.00 |1.13 |1.12 |2.43 |
|ivwrite |1.00 |1.23 |1.21 |2.40 |
|mandelbrot |1.00 |1.13 |1.16 |1.28 |
|meteor |1.00 |2.97 |2.92 |3.17 |
|nbody |1.00 |1.17 |1.15 |1.49 |
|nest-ntimes|1.00 |1.22 |1.20 |1.39 |
|nest-while |1.00 |1.10 |1.10 |1.37 |
|norm |1.00 |1.18 |1.16 |1.24 |
|nsvb |1.00 |1.16 |1.16 |1.17 |
|red-black |1.00 |1.02 |0.99 |1.12 |
|sieve |1.00 |1.30 |1.28 |1.62 |
|trees |1.00 |1.14 |1.13 |1.19 |
|while |1.00 |1.12 |1.11 |2.41 |
** Discourse's script/bench.rb
Benchmark: https://github.com/discourse/discourse/blob/v1.8.7/script/bench.rb
NOTE: Rails performance was somehow a little degraded with JIT for now.
We should fix this.
(At least I know opt_aref is performing badly in JIT and I have an idea
to fix it. Please wait for the fix.)
*** JIT off
Your Results: (note for timings- percentile is first, duration is second in millisecs)
categories_admin:
50: 17
75: 18
90: 22
99: 29
home_admin:
50: 21
75: 21
90: 27
99: 40
topic_admin:
50: 17
75: 18
90: 22
99: 32
categories:
50: 35
75: 41
90: 43
99: 77
home:
50: 39
75: 46
90: 49
99: 95
topic:
50: 46
75: 52
90: 56
99: 101
*** JIT on
Your Results: (note for timings- percentile is first, duration is second in millisecs)
categories_admin:
50: 19
75: 21
90: 25
99: 33
home_admin:
50: 24
75: 26
90: 30
99: 35
topic_admin:
50: 19
75: 20
90: 25
99: 30
categories:
50: 40
75: 44
90: 48
99: 76
home:
50: 42
75: 48
90: 51
99: 89
topic:
50: 49
75: 55
90: 58
99: 99
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62197 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 14:22:28 +03:00
mjit_remove_class_serial ( RCLASS_SERIAL ( obj ) ) ;
2015-08-12 11:43:55 +03:00
rb_id_table_free ( RCLASS_M_TBL ( obj ) ) ;
2012-08-05 14:39:37 +04:00
if ( RCLASS_IV_TBL ( obj ) ) {
st_free_table ( RCLASS_IV_TBL ( obj ) ) ;
}
if ( RCLASS_CONST_TBL ( obj ) ) {
rb_free_const_table ( RCLASS_CONST_TBL ( obj ) ) ;
}
if ( RCLASS_IV_INDEX_TBL ( obj ) ) {
st_free_table ( RCLASS_IV_INDEX_TBL ( obj ) ) ;
}
* class.c, compile.c, eval.c, gc.h, insns.def, internal.h, method.h,
variable.c, vm.c, vm_core.c, vm_insnhelper.c, vm_insnhelper.h,
vm_method.c: Implement class hierarchy method cache invalidation.
[ruby-core:55053] [Feature #8426] [GH-387]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@42822 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-09-04 09:25:06 +04:00
if ( RCLASS_EXT ( obj ) - > subclasses ) {
if ( BUILTIN_TYPE ( obj ) = = T_MODULE ) {
rb_class_detach_module_subclasses ( obj ) ;
2013-09-04 11:18:24 +04:00
}
else {
* class.c, compile.c, eval.c, gc.h, insns.def, internal.h, method.h,
variable.c, vm.c, vm_core.c, vm_insnhelper.c, vm_insnhelper.h,
vm_method.c: Implement class hierarchy method cache invalidation.
[ruby-core:55053] [Feature #8426] [GH-387]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@42822 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-09-04 09:25:06 +04:00
rb_class_detach_subclasses ( obj ) ;
}
RCLASS_EXT ( obj ) - > subclasses = NULL ;
}
rb_class_remove_from_module_subclasses ( obj ) ;
rb_class_remove_from_super_subclasses ( obj ) ;
if ( RANY ( obj ) - > as . klass . ptr )
xfree ( RANY ( obj ) - > as . klass . ptr ) ;
RANY ( obj ) - > as . klass . ptr = NULL ;
2018-09-28 04:10:43 +03:00
( void ) RB_DEBUG_COUNTER_INC_IF ( obj_module_ptr , BUILTIN_TYPE ( obj ) = = T_MODULE ) ;
( void ) RB_DEBUG_COUNTER_INC_IF ( obj_class_ptr , BUILTIN_TYPE ( obj ) = = T_CLASS ) ;
2012-08-05 14:39:37 +04:00
break ;
case T_STRING :
rb_str_free ( obj ) ;
break ;
case T_ARRAY :
2018-10-31 00:02:12 +03:00
rb_ary_free ( obj ) ;
2012-08-05 14:39:37 +04:00
break ;
case T_HASH :
2018-09-26 19:51:09 +03:00
# if USE_DEBUG_COUNTER
2018-10-31 01:11:51 +03:00
if ( RHASH_SIZE ( obj ) > = 8 ) {
RB_DEBUG_COUNTER_INC ( obj_hash_ge8 ) ;
}
else if ( RHASH_SIZE ( obj ) > = 4 ) {
RB_DEBUG_COUNTER_INC ( obj_hash_ge4 ) ;
}
else if ( RHASH_SIZE ( obj ) > = 1 ) {
RB_DEBUG_COUNTER_INC ( obj_hash_under4 ) ;
}
2018-09-25 21:13:29 +03:00
else {
RB_DEBUG_COUNTER_INC ( obj_hash_empty ) ;
}
2018-10-31 01:11:51 +03:00
2018-12-14 04:10:15 +03:00
if ( RHASH_AR_TABLE_P ( obj ) ) {
RB_DEBUG_COUNTER_INC ( obj_hash_ar ) ;
2018-10-31 01:11:51 +03:00
}
else {
RB_DEBUG_COUNTER_INC ( obj_hash_st ) ;
}
# endif
2018-12-14 04:10:15 +03:00
if ( /* RHASH_AR_TABLE_P(obj) */ ! FL_TEST_RAW ( obj , RHASH_ST_TABLE_FLAG ) ) {
2018-12-29 19:29:44 +03:00
struct ar_table_struct * tab = RHASH ( obj ) - > as . ar ;
2018-10-31 01:11:51 +03:00
if ( tab ) {
if ( RHASH_TRANSIENT_P ( obj ) ) {
RB_DEBUG_COUNTER_INC ( obj_hash_transient ) ;
}
else {
ruby_xfree ( tab ) ;
}
}
}
else {
2018-12-14 04:10:15 +03:00
GC_ASSERT ( RHASH_ST_TABLE_P ( obj ) ) ;
2018-10-31 01:11:51 +03:00
st_free_table ( RHASH ( obj ) - > as . st ) ;
}
2012-08-05 14:39:37 +04:00
break ;
case T_REGEXP :
if ( RANY ( obj ) - > as . regexp . ptr ) {
onig_free ( RANY ( obj ) - > as . regexp . ptr ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_regexp_ptr ) ;
2012-08-05 14:39:37 +04:00
}
break ;
case T_DATA :
if ( DATA_PTR ( obj ) ) {
2013-11-03 16:40:29 +04:00
int free_immediately = FALSE ;
2014-06-04 17:33:20 +04:00
void ( * dfree ) ( void * ) ;
void * data = DATA_PTR ( obj ) ;
2013-10-29 13:42:45 +04:00
2012-08-05 14:39:37 +04:00
if ( RTYPEDDATA_P ( obj ) ) {
2013-11-03 16:40:29 +04:00
free_immediately = ( RANY ( obj ) - > as . typeddata . type - > flags & RUBY_TYPED_FREE_IMMEDIATELY ) ! = 0 ;
2014-06-04 17:33:20 +04:00
dfree = RANY ( obj ) - > as . typeddata . type - > function . dfree ;
if ( 0 & & free_immediately = = 0 ) {
/* to expose non-free-immediate T_DATA */
2013-11-03 16:40:29 +04:00
fprintf ( stderr , " not immediate -> %s \n " , RANY ( obj ) - > as . typeddata . type - > wrap_struct_name ) ;
2014-06-04 17:33:20 +04:00
}
2012-08-05 14:39:37 +04:00
}
2014-06-04 17:33:20 +04:00
else {
dfree = RANY ( obj ) - > as . data . dfree ;
2012-08-05 14:39:37 +04:00
}
2014-06-04 17:33:20 +04:00
if ( dfree ) {
if ( dfree = = RUBY_DEFAULT_FREE ) {
xfree ( data ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_data_xfree ) ;
2014-06-04 17:33:20 +04:00
}
else if ( free_immediately ) {
( * dfree ) ( data ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_data_imm_free ) ;
2013-10-29 13:42:45 +04:00
}
else {
2014-06-04 17:33:20 +04:00
make_zombie ( objspace , obj , dfree , data ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_data_zombie ) ;
2013-10-29 13:42:45 +04:00
return 1 ;
}
2012-08-05 14:39:37 +04:00
}
2018-09-28 04:10:43 +03:00
else {
RB_DEBUG_COUNTER_INC ( obj_data_empty ) ;
}
2012-08-05 14:39:37 +04:00
}
break ;
case T_MATCH :
if ( RANY ( obj ) - > as . match . rmatch ) {
struct rmatch * rm = RANY ( obj ) - > as . match . rmatch ;
onig_region_free ( & rm - > regs , 0 ) ;
if ( rm - > char_offset )
xfree ( rm - > char_offset ) ;
xfree ( rm ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_match_ptr ) ;
2012-08-05 14:39:37 +04:00
}
break ;
case T_FILE :
if ( RANY ( obj ) - > as . file . fptr ) {
2014-06-04 17:33:20 +04:00
make_io_zombie ( objspace , obj ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_file_ptr ) ;
2012-08-05 14:39:37 +04:00
return 1 ;
}
break ;
case T_RATIONAL :
case T_COMPLEX :
break ;
case T_ICLASS :
2015-03-11 12:15:20 +03:00
/* Basically , T_ICLASS shares table with the module */
if ( FL_TEST ( obj , RICLASS_IS_ORIGIN ) ) {
2015-08-12 11:43:55 +03:00
rb_id_table_free ( RCLASS_M_TBL ( obj ) ) ;
}
if ( RCLASS_CALLABLE_M_TBL ( obj ) ! = NULL ) {
rb_id_table_free ( RCLASS_CALLABLE_M_TBL ( obj ) ) ;
2015-03-11 12:15:20 +03:00
}
* class.c, compile.c, eval.c, gc.h, insns.def, internal.h, method.h,
variable.c, vm.c, vm_core.c, vm_insnhelper.c, vm_insnhelper.h,
vm_method.c: Implement class hierarchy method cache invalidation.
[ruby-core:55053] [Feature #8426] [GH-387]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@42822 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-09-04 09:25:06 +04:00
if ( RCLASS_EXT ( obj ) - > subclasses ) {
rb_class_detach_subclasses ( obj ) ;
RCLASS_EXT ( obj ) - > subclasses = NULL ;
}
rb_class_remove_from_module_subclasses ( obj ) ;
rb_class_remove_from_super_subclasses ( obj ) ;
2012-08-05 14:39:37 +04:00
xfree ( RANY ( obj ) - > as . klass . ptr ) ;
* class.c, compile.c, eval.c, gc.h, insns.def, internal.h, method.h,
variable.c, vm.c, vm_core.c, vm_insnhelper.c, vm_insnhelper.h,
vm_method.c: Implement class hierarchy method cache invalidation.
[ruby-core:55053] [Feature #8426] [GH-387]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@42822 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-09-04 09:25:06 +04:00
RANY ( obj ) - > as . klass . ptr = NULL ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_iclass_ptr ) ;
2012-08-05 14:39:37 +04:00
break ;
2009-03-23 08:58:15 +03:00
2012-08-05 14:39:37 +04:00
case T_FLOAT :
break ;
2009-03-23 08:58:15 +03:00
2012-08-05 14:39:37 +04:00
case T_BIGNUM :
2014-02-16 01:17:34 +04:00
if ( ! ( RBASIC ( obj ) - > flags & BIGNUM_EMBED_FLAG ) & & BIGNUM_DIGITS ( obj ) ) {
xfree ( BIGNUM_DIGITS ( obj ) ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_bignum_ptr ) ;
2012-08-05 14:39:37 +04:00
}
break ;
2014-08-16 05:53:10 +04:00
2012-08-05 14:39:37 +04:00
case T_NODE :
2017-11-04 17:32:48 +03:00
UNEXPECTED_NODE ( obj_free ) ;
2017-11-04 13:02:43 +03:00
break ;
2008-04-25 13:03:32 +04:00
2012-08-05 14:39:37 +04:00
case T_STRUCT :
2018-10-31 01:03:48 +03:00
if ( ( RBASIC ( obj ) - > flags & RSTRUCT_EMBED_LEN_MASK ) | |
2018-10-31 01:03:42 +03:00
RANY ( obj ) - > as . rstruct . as . heap . ptr = = NULL ) {
2018-09-26 10:28:04 +03:00
RB_DEBUG_COUNTER_INC ( obj_struct_embed ) ;
}
2018-10-31 01:03:42 +03:00
else if ( RSTRUCT_TRANSIENT_P ( obj ) ) {
RB_DEBUG_COUNTER_INC ( obj_struct_transient ) ;
}
else {
xfree ( ( void * ) RANY ( obj ) - > as . rstruct . as . heap . ptr ) ;
RB_DEBUG_COUNTER_INC ( obj_struct_ptr ) ;
}
2012-08-05 14:39:37 +04:00
break ;
2008-04-25 13:03:32 +04:00
2014-03-26 08:57:47 +04:00
case T_SYMBOL :
{
rb_gc_free_dsymbol ( obj ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_symbol ) ;
2014-03-26 08:57:47 +04:00
}
break ;
2015-06-02 07:20:30 +03:00
case T_IMEMO :
2016-07-28 22:13:26 +03:00
switch ( imemo_type ( obj ) ) {
case imemo_ment :
rb_free_method_entry ( & RANY ( obj ) - > as . imemo . ment ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_imemo_ment ) ;
2016-07-28 22:13:26 +03:00
break ;
case imemo_iseq :
rb_iseq_free ( & RANY ( obj ) - > as . imemo . iseq ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_imemo_iseq ) ;
2016-07-28 22:13:26 +03:00
break ;
case imemo_env :
2017-06-22 08:03:18 +03:00
GC_ASSERT ( VM_ENV_ESCAPED_P ( RANY ( obj ) - > as . imemo . env . ep ) ) ;
2016-07-28 22:13:26 +03:00
xfree ( ( VALUE * ) RANY ( obj ) - > as . imemo . env . env ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_imemo_env ) ;
2016-07-28 22:13:26 +03:00
break ;
2018-05-09 10:11:59 +03:00
case imemo_tmpbuf :
2017-10-21 11:40:28 +03:00
xfree ( RANY ( obj ) - > as . imemo . alloc . ptr ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_imemo_tmpbuf ) ;
2017-10-21 11:40:28 +03:00
break ;
2017-10-27 19:44:57 +03:00
case imemo_ast :
rb_ast_free ( & RANY ( obj ) - > as . imemo . ast ) ;
2018-09-28 04:10:43 +03:00
RB_DEBUG_COUNTER_INC ( obj_imemo_ast ) ;
2017-10-27 19:44:57 +03:00
break ;
2018-09-28 04:10:43 +03:00
case imemo_cref :
RB_DEBUG_COUNTER_INC ( obj_imemo_cref ) ;
break ;
case imemo_svar :
RB_DEBUG_COUNTER_INC ( obj_imemo_svar ) ;
break ;
case imemo_throw_data :
RB_DEBUG_COUNTER_INC ( obj_imemo_throw_data ) ;
break ;
case imemo_ifunc :
RB_DEBUG_COUNTER_INC ( obj_imemo_ifunc ) ;
break ;
case imemo_memo :
RB_DEBUG_COUNTER_INC ( obj_imemo_memo ) ;
break ;
case imemo_parser_strterm :
RB_DEBUG_COUNTER_INC ( obj_imemo_parser_strterm ) ;
break ;
2016-07-28 22:13:26 +03:00
default :
2018-09-28 04:10:43 +03:00
/* unreachable */
2016-07-28 22:13:26 +03:00
break ;
2015-06-02 07:20:30 +03:00
}
2015-07-22 01:52:59 +03:00
return 0 ;
2015-06-02 07:20:30 +03:00
2012-08-05 14:39:37 +04:00
default :
rb_bug ( " gc_sweep(): unknown data type 0x%x(%p) 0x% " PRIxVALUE ,
BUILTIN_TYPE ( obj ) , ( void * ) obj , RBASIC ( obj ) - > flags ) ;
2008-04-25 13:03:32 +04:00
}
2012-08-05 14:39:37 +04:00
2014-06-04 17:33:20 +04:00
if ( FL_TEST ( obj , FL_FINALIZE ) ) {
make_zombie ( objspace , obj , 0 , 0 ) ;
return 1 ;
}
else {
return 0 ;
}
2008-04-25 13:03:32 +04:00
}
2012-08-05 14:39:37 +04:00
void
Init_heap ( void )
2010-11-03 04:01:12 +03:00
{
2013-10-22 14:54:44 +04:00
rb_objspace_t * objspace = & rb_objspace ;
2014-09-09 08:56:55 +04:00
gc_stress_set ( objspace , ruby_initial_gc_stress ) ;
2013-12-06 13:38:22 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
objspace - > rgengc . oldmalloc_increase_limit = gc_params . oldmalloc_limit_min ;
# endif
2016-01-09 01:15:40 +03:00
heap_add_pages ( objspace , heap_eden , gc_params . heap_init_slots / HEAP_PAGE_OBJ_LIMIT ) ;
2013-10-22 14:54:44 +04:00
init_mark_stack ( & objspace - > mark_stack ) ;
objspace - > profile . invoke_time = getrusage_time ( ) ;
finalizer_table = st_init_numtable ( ) ;
2010-11-03 04:01:12 +03:00
}
2012-08-05 14:39:37 +04:00
typedef int each_obj_callback ( void * , void * , size_t , void * ) ;
2008-07-05 11:15:41 +04:00
2012-08-05 14:39:37 +04:00
struct each_obj_args {
each_obj_callback * callback ;
void * data ;
} ;
static VALUE
objspace_each_objects ( VALUE arg )
1998-01-16 15:13:05 +03:00
{
2012-08-05 14:39:37 +04:00
size_t i ;
2013-10-18 10:33:36 +04:00
struct heap_page * page ;
2016-01-08 13:56:27 +03:00
RVALUE * pstart = NULL , * pend ;
2010-11-04 15:06:08 +03:00
rb_objspace_t * objspace = & rb_objspace ;
2012-08-05 14:39:37 +04:00
struct each_obj_args * args = ( struct each_obj_args * ) arg ;
2009-02-22 17:23:33 +03:00
2012-08-05 14:39:37 +04:00
i = 0 ;
2014-09-09 13:33:52 +04:00
while ( i < heap_allocated_pages ) {
2016-01-08 13:56:27 +03:00
while ( 0 < i & & pstart < heap_pages_sorted [ i - 1 ] - > start ) i - - ;
while ( i < heap_allocated_pages & & heap_pages_sorted [ i ] - > start < = pstart ) i + + ;
2014-09-09 13:33:52 +04:00
if ( heap_allocated_pages < = i ) break ;
2010-11-04 15:06:08 +03:00
2013-10-22 14:28:31 +04:00
page = heap_pages_sorted [ i ] ;
2013-07-17 10:52:13 +04:00
2013-10-18 10:33:36 +04:00
pstart = page - > start ;
2014-09-08 08:11:00 +04:00
pend = pstart + page - > total_slots ;
2010-11-04 15:06:08 +03:00
2013-10-17 00:41:49 +04:00
if ( ( * args - > callback ) ( pstart , pend , sizeof ( RVALUE ) , args - > data ) ) {
break ;
2006-12-31 18:02:22 +03:00
}
}
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
return Qnil ;
1998-01-16 15:13:05 +03:00
}
2014-09-08 08:11:00 +04:00
static VALUE
incremental_enable ( void )
{
rb_objspace_t * objspace = & rb_objspace ;
objspace - > flags . dont_incremental = FALSE ;
return Qnil ;
}
2012-08-05 14:39:37 +04:00
/*
* rb_objspace_each_objects ( ) is special C API to walk through
* Ruby object space . This C API is too difficult to use it .
* To be frank , you should not use it . Or you need to read the
* source code of this function and understand what this function does .
*
2013-10-18 10:33:36 +04:00
* ' callback ' will be called several times ( the number of heap page ,
2012-08-05 14:39:37 +04:00
* at current implementation ) with :
2013-10-18 10:33:36 +04:00
* vstart : a pointer to the first living object of the heap_page .
* vend : a pointer to next to the valid heap_page area .
2012-08-05 14:39:37 +04:00
* stride : a distance to next VALUE .
*
* If callback ( ) returns non - zero , the iteration will be stopped .
*
* This is a sample callback code to iterate liveness objects :
*
* int
* sample_callback ( void * vstart , void * vend , int stride , void * data ) {
* VALUE v = ( VALUE ) vstart ;
* for ( ; v ! = ( VALUE ) vend ; v + = stride ) {
* if ( RBASIC ( v ) - > flags ) { // liveness check
* // do something with live object 'v'
* }
* return 0 ; // continue to iteration
* }
*
2013-10-18 10:33:36 +04:00
* Note : ' vstart ' is not a top of heap_page . This point the first
2012-08-05 14:39:37 +04:00
* living object to grasp at least one object to avoid GC issue .
2013-10-18 10:33:36 +04:00
* This means that you can not walk through all Ruby object page
* including freed object page .
2012-08-05 14:39:37 +04:00
*
* Note : On this implementation , ' stride ' is same as sizeof ( RVALUE ) .
* However , there are possibilities to pass variable values with
* ' stride ' with some reasons . You must use stride instead of
* use some constant value in the iteration .
*/
void
rb_objspace_each_objects ( each_obj_callback * callback , void * data )
{
struct each_obj_args args ;
rb_objspace_t * objspace = & rb_objspace ;
2014-09-08 08:11:00 +04:00
int prev_dont_incremental = objspace - > flags . dont_incremental ;
2009-06-17 01:36:50 +04:00
2014-09-08 08:11:00 +04:00
gc_rest ( objspace ) ;
objspace - > flags . dont_incremental = TRUE ;
2009-06-17 01:36:50 +04:00
2012-08-05 14:39:37 +04:00
args . callback = callback ;
args . data = data ;
2013-12-16 13:01:49 +04:00
2014-09-08 08:11:00 +04:00
if ( prev_dont_incremental ) {
2013-12-16 13:01:49 +04:00
objspace_each_objects ( ( VALUE ) & args ) ;
}
else {
2014-09-08 08:11:00 +04:00
rb_ensure ( objspace_each_objects , ( VALUE ) & args , incremental_enable , Qnil ) ;
2013-12-16 13:01:49 +04:00
}
2009-06-17 01:36:50 +04:00
}
2014-04-02 15:59:50 +04:00
void
rb_objspace_each_objects_without_setup ( each_obj_callback * callback , void * data )
{
struct each_obj_args args ;
args . callback = callback ;
args . data = data ;
objspace_each_objects ( ( VALUE ) & args ) ;
}
2012-08-05 14:39:37 +04:00
struct os_each_struct {
size_t num ;
VALUE of ;
} ;
2009-06-17 01:36:50 +04:00
2012-10-05 12:14:09 +04:00
static int
internal_object_p ( VALUE obj )
{
RVALUE * p = ( RVALUE * ) obj ;
2018-11-06 13:06:07 +03:00
void * ptr = __asan_region_is_poisoned ( p , SIZEOF_VALUE ) ;
unpoison_object ( obj , false ) ;
2019-04-02 22:13:12 +03:00
bool used_p = p - > as . basic . flags ;
2012-10-05 12:14:09 +04:00
2018-11-06 13:06:07 +03:00
if ( used_p ) {
2012-10-05 12:14:09 +04:00
switch ( BUILTIN_TYPE ( p ) ) {
2017-11-04 13:02:43 +03:00
case T_NODE :
2017-11-04 17:32:48 +03:00
UNEXPECTED_NODE ( internal_object_p ) ;
break ;
2012-10-05 12:14:09 +04:00
case T_NONE :
2015-03-11 13:36:17 +03:00
case T_IMEMO :
2012-10-05 12:14:09 +04:00
case T_ICLASS :
case T_ZOMBIE :
break ;
2015-12-22 16:15:58 +03:00
case T_CLASS :
if ( ! p - > as . basic . klass ) break ;
if ( FL_TEST ( obj , FL_SINGLETON ) ) {
return rb_singleton_class_internal_p ( obj ) ;
}
return 0 ;
2012-10-05 12:14:09 +04:00
default :
if ( ! p - > as . basic . klass ) break ;
return 0 ;
}
}
2018-11-06 13:06:07 +03:00
if ( ptr | | ! used_p ) {
poison_object ( obj ) ;
}
2012-10-05 12:14:09 +04:00
return 1 ;
}
2012-10-24 04:04:56 +04:00
int
rb_objspace_internal_object_p ( VALUE obj )
{
return internal_object_p ( obj ) ;
}
2012-08-05 14:39:37 +04:00
static int
os_obj_of_i ( void * vstart , void * vend , size_t stride , void * data )
2009-06-17 01:36:50 +04:00
{
2012-08-05 14:39:37 +04:00
struct os_each_struct * oes = ( struct os_each_struct * ) data ;
RVALUE * p = ( RVALUE * ) vstart , * pend = ( RVALUE * ) vend ;
2009-06-17 01:36:50 +04:00
2012-08-05 14:39:37 +04:00
for ( ; p ! = pend ; p + + ) {
2012-10-05 12:14:09 +04:00
volatile VALUE v = ( VALUE ) p ;
if ( ! internal_object_p ( v ) ) {
if ( ! oes - > of | | rb_obj_is_kind_of ( v , oes - > of ) ) {
rb_yield ( v ) ;
oes - > num + + ;
2012-08-05 14:39:37 +04:00
}
}
}
2007-06-14 12:35:20 +04:00
2012-08-05 14:39:37 +04:00
return 0 ;
}
2004-12-13 12:57:41 +03:00
2012-08-05 14:39:37 +04:00
static VALUE
os_obj_of ( VALUE of )
2003-06-28 07:29:00 +04:00
{
2012-08-05 14:39:37 +04:00
struct os_each_struct oes ;
2003-06-28 07:29:00 +04:00
2012-08-05 14:39:37 +04:00
oes . num = 0 ;
oes . of = of ;
rb_objspace_each_objects ( os_obj_of_i , & oes ) ;
return SIZET2NUM ( oes . num ) ;
2003-06-28 07:29:00 +04:00
}
2001-11-19 08:03:03 +03:00
2012-08-05 14:39:37 +04:00
/*
* call - seq :
2016-09-08 07:57:49 +03:00
* ObjectSpace . each_object ( [ module ] ) { | obj | . . . } - > integer
2012-08-05 14:39:37 +04:00
* ObjectSpace . each_object ( [ module ] ) - > an_enumerator
*
* Calls the block once for each living , nonimmediate object in this
* Ruby process . If < i > module < / i > is specified , calls the block
* for only those classes or modules that match ( or are a subclass of )
* < i > module < / i > . Returns the number of objects found . Immediate
* objects ( < code > Fixnum < / code > s , < code > Symbol < / code > s
* < code > true < / code > , < code > false < / code > , and < code > nil < / code > ) are
2019-03-22 14:04:59 +03:00
* never returned . In the example below , # each_object returns both
* the numbers we defined and several constants defined in the Math
* module .
2012-08-05 14:39:37 +04:00
*
* If no block is given , an enumerator is returned instead .
*
* a = 102.7
* b = 95 # Won ' t be returned
* c = 12345678987654321
* count = ObjectSpace . each_object ( Numeric ) { | x | p x }
* puts " Total count: #{count} "
*
* < em > produces : < / em >
*
* 12345678987654321
* 102.7
* 2.71828182845905
* 3.14159265358979
* 2.22044604925031e-16
* 1.7976931348623157e+308
* 2.2250738585072e-308
* Total count : 7
*
*/
2003-04-09 10:44:34 +04:00
2012-08-05 14:39:37 +04:00
static VALUE
os_each_obj ( int argc , VALUE * argv , VALUE os )
2001-11-19 08:03:03 +03:00
{
2012-08-05 14:39:37 +04:00
VALUE of ;
2001-11-19 08:03:03 +03:00
2018-12-06 10:49:24 +03:00
of = ( ! rb_check_arity ( argc , 0 , 1 ) ? 0 : argv [ 0 ] ) ;
2012-08-05 14:39:37 +04:00
RETURN_ENUMERATOR ( os , 1 , & of ) ;
return os_obj_of ( of ) ;
2008-11-27 17:55:45 +03:00
}
2012-08-05 14:39:37 +04:00
/*
* call - seq :
* ObjectSpace . undefine_finalizer ( obj )
*
* Removes all finalizers for < i > obj < / i > .
*
*/
2011-07-07 18:59:09 +04:00
2012-08-05 14:39:37 +04:00
static VALUE
undefine_final ( VALUE os , VALUE obj )
2008-11-27 17:55:45 +03:00
{
2013-11-10 03:03:11 +04:00
return rb_undefine_finalizer ( obj ) ;
2001-11-19 08:03:03 +03:00
}
2001-11-13 11:19:52 +03:00
2012-08-05 14:39:37 +04:00
VALUE
2013-11-10 03:03:11 +04:00
rb_undefine_finalizer ( VALUE obj )
2001-11-13 11:19:52 +03:00
{
2012-08-05 14:39:37 +04:00
rb_objspace_t * objspace = & rb_objspace ;
st_data_t data = obj ;
rb_check_frozen ( obj ) ;
st_delete ( finalizer_table , & data , 0 ) ;
FL_UNSET ( obj , FL_FINALIZE ) ;
return obj ;
2001-11-13 11:19:52 +03:00
}
2013-11-09 19:34:30 +04:00
static void
should_be_callable ( VALUE block )
{
2018-01-09 05:45:03 +03:00
if ( ! rb_obj_respond_to ( block , idCall , TRUE ) ) {
2014-09-10 12:22:03 +04:00
rb_raise ( rb_eArgError , " wrong type argument % " PRIsVALUE " (should be callable) " ,
rb_obj_class ( block ) ) ;
2013-11-09 19:34:30 +04:00
}
}
2013-12-10 09:17:19 +04:00
static void
should_be_finalizable ( VALUE obj )
{
if ( ! FL_ABLE ( obj ) ) {
rb_raise ( rb_eArgError , " cannot define finalizer for %s " ,
rb_obj_classname ( obj ) ) ;
}
2014-09-11 09:37:32 +04:00
rb_check_frozen ( obj ) ;
2013-12-10 09:17:19 +04:00
}
2013-11-09 19:34:30 +04:00
2012-08-05 14:39:37 +04:00
/*
* call - seq :
* ObjectSpace . define_finalizer ( obj , aProc = proc ( ) )
*
* Adds < i > aProc < / i > as a finalizer , to be called after < i > obj < / i >
2015-07-29 04:44:49 +03:00
* was destroyed . The object ID of the < i > obj < / i > will be passed
* as an argument to < i > aProc < / i > . If < i > aProc < / i > is a lambda or
* method , make sure it can be called with a single argument .
2012-08-05 14:39:37 +04:00
*
*/
2003-04-09 10:44:34 +04:00
2012-08-05 14:39:37 +04:00
static VALUE
define_final ( int argc , VALUE * argv , VALUE os )
2001-11-13 11:19:52 +03:00
{
2012-08-05 14:39:37 +04:00
VALUE obj , block ;
2001-11-27 13:00:35 +03:00
2012-08-05 14:39:37 +04:00
rb_scan_args ( argc , argv , " 11 " , & obj , & block ) ;
2013-12-10 09:17:19 +04:00
should_be_finalizable ( obj ) ;
2012-08-05 14:39:37 +04:00
if ( argc = = 1 ) {
block = rb_block_proc ( ) ;
}
2013-11-09 19:34:30 +04:00
else {
should_be_callable ( block ) ;
2001-11-13 11:19:52 +03:00
}
* probes.d: add DTrace probe declarations. [ruby-core:27448]
* array.c (empty_ary_alloc, ary_new): added array create DTrace probe.
* compile.c (rb_insns_name): allowing DTrace probes to access
instruction sequence name.
* Makefile.in: translate probes.d file to appropriate header file.
* common.mk: declare dependencies on the DTrace header.
* configure.in: add a test for existence of DTrace.
* eval.c (setup_exception): add a probe for when an exception is
raised.
* gc.c: Add DTrace probes for mark begin and end, and sweep begin and
end.
* hash.c (empty_hash_alloc): Add a probe for hash allocation.
* insns.def: Add probes for function entry and return.
* internal.h: function declaration for compile.c change.
* load.c (rb_f_load): add probes for `load` entry and exit, require
entry and exit, and wrapping search_required for load path search.
* object.c (rb_obj_alloc): added a probe for general object creation.
* parse.y (yycompile0): added a probe around parse and compile phase.
* string.c (empty_str_alloc, str_new): DTrace probes for string
allocation.
* test/dtrace/*: tests for DTrace probes.
* vm.c (vm_invoke_proc): add probes for function return on exception
raise, hash create, and instruction sequence execution.
* vm_core.h: add probe declarations for function entry and exit.
* vm_dump.c: add probes header file.
* vm_eval.c (vm_call0_cfunc, vm_call0_cfunc_with_frame): add probe on
function entry and return.
* vm_exec.c: expose instruction number to instruction name function.
* vm_insnshelper.c: add function entry and exit probes for cfunc
methods.
* vm_insnhelper.h: vm usage information is always collected, so
uncomment the functions.
12 19:14:50 2012 Akinori MUSHA <knu@iDaemons.org>
* configure.in (isinf, isnan): isinf() and isnan() are macros on
DragonFly which cannot be found by AC_REPLACE_FUNCS(). This
workaround enforces the fact that they exist on DragonFly.
12 15:59:38 2012 Shugo Maeda <shugo@ruby-lang.org>
* vm_core.h (rb_call_info_t::refinements), compile.c (new_callinfo),
vm_insnhelper.c (vm_search_method): revert r37616 because it's too
slow. [ruby-dev:46477]
* test/ruby/test_refinement.rb (test_inline_method_cache): skip
the test until the bug is fixed efficiently.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37631 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-11-13 01:52:12 +04:00
2012-08-05 14:39:37 +04:00
return define_final0 ( obj , block ) ;
2001-11-13 11:19:52 +03:00
}
2012-08-05 14:39:37 +04:00
static VALUE
define_final0 ( VALUE obj , VALUE block )
2003-04-09 12:27:01 +04:00
{
2012-08-05 14:39:37 +04:00
rb_objspace_t * objspace = & rb_objspace ;
VALUE table ;
st_data_t data ;
2003-04-09 12:27:01 +04:00
2012-08-05 14:39:37 +04:00
RBASIC ( obj ) - > flags | = FL_FINALIZE ;
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
block = rb_ary_new3 ( 2 , INT2FIX ( rb_safe_level ( ) ) , block ) ;
OBJ_FREEZE ( block ) ;
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
if ( st_lookup ( finalizer_table , obj , & data ) ) {
table = ( VALUE ) data ;
2014-12-13 04:28:18 +03:00
/* avoid duplicate block, table is usually small */
{
long len = RARRAY_LEN ( table ) ;
long i ;
2018-10-29 21:00:14 +03:00
for ( i = 0 ; i < len ; i + + ) {
2018-10-29 21:00:02 +03:00
VALUE recv = RARRAY_AREF ( table , i ) ;
2018-10-29 21:00:14 +03:00
if ( rb_funcall ( recv , idEq , 1 , block ) ) {
return recv ;
2014-12-13 04:28:18 +03:00
}
}
}
2012-08-05 14:39:37 +04:00
rb_ary_push ( table , block ) ;
1998-01-16 15:13:05 +03:00
}
2012-08-05 14:39:37 +04:00
else {
table = rb_ary_new3 ( 1 , block ) ;
* include/ruby/ruby.h: constify RBasic::klass and add
RBASIC_CLASS(obj) macro which returns a class of `obj'.
This change is a part of RGENGC branch [ruby-trunk - Feature #8339].
* object.c: add new function rb_obj_reveal().
This function reveal interal (hidden) object by rb_obj_hide().
Note that do not change class before and after hiding.
Only permitted example is:
klass = RBASIC_CLASS(obj);
rb_obj_hide(obj);
....
rb_obj_reveal(obj, klass);
TODO: API design. rb_obj_reveal() should be replaced with others.
TODO: modify constified variables using cast may be harmful for
compiler's analysis and optimizaton.
Any idea to prohibt inserting RBasic::klass directly?
If rename RBasic::klass and force to use RBASIC_CLASS(obj),
then all codes such as `RBASIC(obj)->klass' will be
compilation error. Is it acceptable? (We have similar
experience at Ruby 1.9,
for example "RARRAY(ary)->ptr" to "RARRAY_PTR(ary)".
* internal.h: add some macros.
* RBASIC_CLEAR_CLASS(obj) clear RBasic::klass to make it internal
object.
* RBASIC_SET_CLASS(obj, cls) set RBasic::klass.
* RBASIC_SET_CLASS_RAW(obj, cls) same as RBASIC_SET_CLASS
without write barrier (planned).
* RCLASS_SET_SUPER(a, b) set super class of a.
* array.c, class.c, compile.c, encoding.c, enum.c, error.c, eval.c,
file.c, gc.c, hash.c, io.c, iseq.c, marshal.c, object.c,
parse.y, proc.c, process.c, random.c, ruby.c, sprintf.c,
string.c, thread.c, transcode.c, vm.c, vm_eval.c, win32/file.c:
Use above macros and functions to access RBasic::klass.
* ext/coverage/coverage.c, ext/readline/readline.c,
ext/socket/ancdata.c, ext/socket/init.c,
* ext/zlib/zlib.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40691 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 14:49:11 +04:00
RBASIC_CLEAR_CLASS ( table ) ;
2012-08-05 14:39:37 +04:00
st_add_direct ( finalizer_table , obj , table ) ;
}
return block ;
1998-01-16 15:13:05 +03:00
}
2012-08-05 14:39:37 +04:00
VALUE
2013-11-10 03:03:11 +04:00
rb_define_finalizer ( VALUE obj , VALUE block )
1998-01-16 15:13:05 +03:00
{
2013-12-10 09:17:19 +04:00
should_be_finalizable ( obj ) ;
2013-11-09 19:34:30 +04:00
should_be_callable ( block ) ;
2012-08-05 14:39:37 +04:00
return define_final0 ( obj , block ) ;
1998-01-16 15:13:05 +03:00
}
2012-08-05 14:39:37 +04:00
void
rb_gc_copy_finalizer ( VALUE dest , VALUE obj )
1998-01-16 15:13:05 +03:00
{
2012-08-05 14:39:37 +04:00
rb_objspace_t * objspace = & rb_objspace ;
VALUE table ;
st_data_t data ;
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
if ( ! FL_TEST ( obj , FL_FINALIZE ) ) return ;
if ( st_lookup ( finalizer_table , obj , & data ) ) {
table = ( VALUE ) data ;
st_insert ( finalizer_table , dest , table ) ;
}
FL_SET ( dest , FL_FINALIZE ) ;
2008-04-27 07:20:35 +04:00
}
2012-08-05 14:39:37 +04:00
static VALUE
2016-07-22 00:28:34 +03:00
run_single_final ( VALUE final , VALUE objid )
2008-04-27 07:20:35 +04:00
{
2016-07-22 00:28:34 +03:00
const VALUE cmd = RARRAY_AREF ( final , 1 ) ;
const int level = OBJ_TAINTED ( cmd ) ?
RUBY_SAFE_LEVEL_MAX : FIX2INT ( RARRAY_AREF ( final , 0 ) ) ;
2015-07-18 13:52:19 +03:00
2016-07-22 00:28:34 +03:00
rb_set_safe_level_force ( level ) ;
return rb_check_funcall ( cmd , idCall , 1 , & objid ) ;
1998-01-16 15:13:05 +03:00
}
2012-08-05 14:39:37 +04:00
static void
run_finalizer ( rb_objspace_t * objspace , VALUE obj , VALUE table )
{
long i ;
2017-06-23 10:25:52 +03:00
enum ruby_tag_type state ;
2016-07-26 16:43:38 +03:00
volatile struct {
VALUE errinfo ;
VALUE objid ;
2017-08-23 07:01:39 +03:00
rb_control_frame_t * cfp ;
2016-07-26 16:43:38 +03:00
long finished ;
int safe ;
} saved ;
2017-10-29 16:20:44 +03:00
rb_execution_context_t * volatile ec = GET_EC ( ) ;
2016-07-26 16:43:38 +03:00
# define RESTORE_FINALIZER() (\
2017-10-29 16:20:44 +03:00
ec - > cfp = saved . cfp , \
2016-07-26 16:43:38 +03:00
rb_set_safe_level_force ( saved . safe ) , \
rb_set_errinfo ( saved . errinfo ) )
saved . safe = rb_safe_level ( ) ;
saved . errinfo = rb_errinfo ( ) ;
saved . objid = nonspecial_obj_id ( obj ) ;
2017-10-29 16:20:44 +03:00
saved . cfp = ec - > cfp ;
2016-07-26 16:43:38 +03:00
saved . finished = 0 ;
2016-07-22 00:28:34 +03:00
2017-10-29 16:20:44 +03:00
EC_PUSH_TAG ( ec ) ;
2017-10-26 14:02:13 +03:00
state = EC_EXEC_TAG ( ) ;
2017-08-23 05:26:02 +03:00
if ( state ! = TAG_NONE ) {
2017-06-23 10:25:52 +03:00
+ + saved . finished ; /* skip failed finalizer */
2012-08-05 14:39:37 +04:00
}
2017-08-23 05:26:02 +03:00
for ( i = saved . finished ;
RESTORE_FINALIZER ( ) , i < RARRAY_LEN ( table ) ;
saved . finished = + + i ) {
run_single_final ( RARRAY_AREF ( table , i ) , saved . objid ) ;
}
2017-10-26 14:02:13 +03:00
EC_POP_TAG ( ) ;
2016-07-26 16:43:38 +03:00
# undef RESTORE_FINALIZER
1998-01-16 15:13:05 +03:00
}
2005-10-18 21:35:18 +04:00
static void
2014-06-04 17:33:20 +04:00
run_final ( rb_objspace_t * objspace , VALUE zombie )
1998-01-16 15:13:05 +03:00
{
2012-08-05 14:39:37 +04:00
st_data_t key , table ;
2014-06-04 17:33:20 +04:00
if ( RZOMBIE ( zombie ) - > dfree ) {
RZOMBIE ( zombie ) - > dfree ( RZOMBIE ( zombie ) - > data ) ;
2012-08-05 14:39:37 +04:00
}
2014-06-04 17:33:20 +04:00
key = ( st_data_t ) zombie ;
2012-08-05 14:39:37 +04:00
if ( st_delete ( finalizer_table , & key , & table ) ) {
2014-06-04 17:33:20 +04:00
run_finalizer ( objspace , zombie , ( VALUE ) table ) ;
2012-08-05 14:39:37 +04:00
}
2003-11-28 17:23:33 +03:00
}
2012-08-05 14:39:37 +04:00
static void
2014-06-04 17:33:20 +04:00
finalize_list ( rb_objspace_t * objspace , VALUE zombie )
2007-09-26 23:40:49 +04:00
{
2014-06-04 17:33:20 +04:00
while ( zombie ) {
2018-11-06 13:06:07 +03:00
VALUE next_zombie ;
struct heap_page * page ;
unpoison_object ( zombie , false ) ;
next_zombie = RZOMBIE ( zombie ) - > next ;
page = GET_HEAP_PAGE ( zombie ) ;
2013-10-23 12:48:54 +04:00
2014-06-04 17:33:20 +04:00
run_final ( objspace , zombie ) ;
2013-10-23 12:48:54 +04:00
2014-06-04 17:33:20 +04:00
RZOMBIE ( zombie ) - > basic . flags = 0 ;
2018-11-08 12:46:15 +03:00
if ( LIKELY ( heap_pages_final_slots ) ) heap_pages_final_slots - - ;
2013-11-24 23:18:53 +04:00
page - > final_slots - - ;
2014-09-08 08:11:00 +04:00
page - > free_slots + + ;
2014-06-04 17:33:20 +04:00
heap_page_add_freeobj ( objspace , GET_HEAP_PAGE ( zombie ) , zombie ) ;
2014-09-09 14:01:18 +04:00
objspace - > profile . total_freed_objects + + ;
2013-10-23 12:48:54 +04:00
2014-06-04 17:33:20 +04:00
zombie = next_zombie ;
2012-08-05 14:39:37 +04:00
}
2007-09-26 23:40:49 +04:00
}
static void
2012-08-05 14:39:37 +04:00
finalize_deferred ( rb_objspace_t * objspace )
2007-09-26 23:40:49 +04:00
{
2014-06-04 17:33:20 +04:00
VALUE zombie ;
2012-08-05 14:39:37 +04:00
2015-08-06 04:44:03 +03:00
while ( ( zombie = ATOMIC_VALUE_EXCHANGE ( heap_pages_deferred_final , 0 ) ) ! = 0 ) {
2014-06-04 17:33:20 +04:00
finalize_list ( objspace , zombie ) ;
2012-08-05 14:39:37 +04:00
}
2007-09-26 23:40:49 +04:00
}
2013-05-27 01:30:44 +04:00
static void
gc_finalize_deferred ( void * dmy )
2007-09-26 23:40:49 +04:00
{
2016-03-15 09:42:29 +03:00
rb_objspace_t * objspace = dmy ;
2012-08-05 14:39:37 +04:00
if ( ATOMIC_EXCHANGE ( finalizing , 1 ) ) return ;
finalize_deferred ( objspace ) ;
ATOMIC_SET ( finalizing , 0 ) ;
2007-09-26 23:40:49 +04:00
}
2013-05-27 01:30:44 +04:00
/* TODO: to keep compatibility, maybe unused. */
void
rb_gc_finalize_deferred ( void )
{
gc_finalize_deferred ( 0 ) ;
}
static void
2016-03-15 09:42:29 +03:00
gc_finalize_deferred_register ( rb_objspace_t * objspace )
2013-05-27 01:30:44 +04:00
{
2016-03-15 09:42:29 +03:00
if ( rb_postponed_job_register_one ( 0 , gc_finalize_deferred , objspace ) = = 0 ) {
2013-10-22 10:24:54 +04:00
rb_bug ( " gc_finalize_deferred_register: can't register finalizer. " ) ;
}
2013-05-27 01:30:44 +04:00
}
2012-08-05 14:39:37 +04:00
struct force_finalize_list {
VALUE obj ;
VALUE table ;
struct force_finalize_list * next ;
} ;
static int
force_chain_object ( st_data_t key , st_data_t val , st_data_t arg )
1998-01-16 15:13:05 +03:00
{
2012-08-05 14:39:37 +04:00
struct force_finalize_list * * prev = ( struct force_finalize_list * * ) arg ;
struct force_finalize_list * curr = ALLOC ( struct force_finalize_list ) ;
curr - > obj = key ;
curr - > table = val ;
curr - > next = * prev ;
* prev = curr ;
return ST_CONTINUE ;
2003-11-28 17:23:33 +03:00
}
void
2012-08-05 14:39:37 +04:00
rb_gc_call_finalizer_at_exit ( void )
2003-11-28 17:23:33 +03:00
{
2014-06-03 13:02:21 +04:00
# if RGENGC_CHECK_MODE >= 2
gc_verify_internal_consistency ( Qnil ) ;
# endif
2012-08-05 14:39:37 +04:00
rb_objspace_call_finalizer ( & rb_objspace ) ;
1998-01-16 15:13:05 +03:00
}
2009-07-15 18:59:41 +04:00
static void
2012-08-05 14:39:37 +04:00
rb_objspace_call_finalizer ( rb_objspace_t * objspace )
2009-07-15 18:59:41 +04:00
{
2012-08-05 14:39:37 +04:00
RVALUE * p , * pend ;
size_t i ;
2009-08-28 06:45:41 +04:00
2014-09-08 08:11:00 +04:00
gc_rest ( objspace ) ;
2009-07-15 18:59:41 +04:00
2012-08-05 14:39:37 +04:00
if ( ATOMIC_EXCHANGE ( finalizing , 1 ) ) return ;
2009-07-15 18:59:41 +04:00
2012-10-05 13:21:41 +04:00
/* run finalizers */
2012-11-30 02:56:02 +04:00
finalize_deferred ( objspace ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( heap_pages_deferred_final = = 0 ) ;
2012-11-30 02:56:02 +04:00
2015-02-22 05:00:40 +03:00
gc_rest ( objspace ) ;
/* prohibit incremental GC */
objspace - > flags . dont_incremental = 1 ;
2012-08-05 14:39:37 +04:00
/* force to run finalizer */
while ( finalizer_table - > num_entries ) {
struct force_finalize_list * list = 0 ;
st_foreach ( finalizer_table , force_chain_object , ( st_data_t ) & list ) ;
while ( list ) {
struct force_finalize_list * curr = list ;
st_data_t obj = ( st_data_t ) curr - > obj ;
run_finalizer ( objspace , curr - > obj , curr - > table ) ;
st_delete ( finalizer_table , & obj , 0 ) ;
list = curr - > next ;
xfree ( curr ) ;
}
}
2009-07-15 18:59:41 +04:00
2015-02-22 05:00:40 +03:00
/* prohibit GC because force T_DATA finalizers can break an object graph consistency */
dont_gc = 1 ;
/* running data/file finalizers are part of garbage collection */
2014-09-08 08:11:00 +04:00
gc_enter ( objspace , " rb_objspace_call_finalizer " ) ;
2009-07-15 18:59:41 +04:00
2015-02-22 05:00:40 +03:00
/* run data/file object's finalizers */
2014-09-09 13:33:52 +04:00
for ( i = 0 ; i < heap_allocated_pages ; i + + ) {
2014-09-08 08:11:00 +04:00
p = heap_pages_sorted [ i ] - > start ; pend = p + heap_pages_sorted [ i ] - > total_slots ;
2012-08-05 14:39:37 +04:00
while ( p < pend ) {
2019-04-02 02:55:02 +03:00
void * poisoned = poisoned_object_p ( p ) ;
2018-11-06 13:06:07 +03:00
unpoison_object ( ( VALUE ) p , false ) ;
2013-11-07 22:10:33 +04:00
switch ( BUILTIN_TYPE ( p ) ) {
case T_DATA :
if ( ! DATA_PTR ( p ) | | ! RANY ( p ) - > as . data . dfree ) break ;
if ( rb_obj_is_thread ( ( VALUE ) p ) ) break ;
if ( rb_obj_is_mutex ( ( VALUE ) p ) ) break ;
if ( rb_obj_is_fiber ( ( VALUE ) p ) ) break ;
2012-08-05 14:39:37 +04:00
p - > as . free . flags = 0 ;
if ( RTYPEDDATA_P ( p ) ) {
RDATA ( p ) - > dfree = RANY ( p ) - > as . typeddata . type - > function . dfree ;
}
2018-05-22 04:42:21 +03:00
if ( RANY ( p ) - > as . data . dfree = = RUBY_DEFAULT_FREE ) {
2012-08-05 14:39:37 +04:00
xfree ( DATA_PTR ( p ) ) ;
}
else if ( RANY ( p ) - > as . data . dfree ) {
2014-06-04 17:33:20 +04:00
make_zombie ( objspace , ( VALUE ) p , RANY ( p ) - > as . data . dfree , RANY ( p ) - > as . data . data ) ;
2012-08-05 14:39:37 +04:00
}
2013-11-07 22:10:33 +04:00
break ;
case T_FILE :
2012-08-05 14:39:37 +04:00
if ( RANY ( p ) - > as . file . fptr ) {
2014-06-04 17:33:20 +04:00
make_io_zombie ( objspace , ( VALUE ) p ) ;
2012-08-05 14:39:37 +04:00
}
2013-11-07 22:10:33 +04:00
break ;
2012-08-05 14:39:37 +04:00
}
2019-04-02 02:55:02 +03:00
if ( poisoned ) {
GC_ASSERT ( BUILTIN_TYPE ( p ) = = T_NONE ) ;
poison_object ( ( VALUE ) p ) ;
}
2012-08-05 14:39:37 +04:00
p + + ;
}
}
2014-09-08 08:11:00 +04:00
gc_exit ( objspace , " rb_objspace_call_finalizer " ) ;
2013-10-22 14:28:31 +04:00
if ( heap_pages_deferred_final ) {
finalize_list ( objspace , heap_pages_deferred_final ) ;
2012-03-18 12:17:39 +04:00
}
2009-07-15 18:59:41 +04:00
2012-08-05 14:39:37 +04:00
st_free_table ( finalizer_table ) ;
finalizer_table = 0 ;
ATOMIC_SET ( finalizing , 0 ) ;
2010-10-26 21:27:32 +04:00
}
2016-05-08 20:44:51 +03:00
PUREFUNC ( static inline int is_id_value ( rb_objspace_t * objspace , VALUE ptr ) ) ;
2012-08-05 14:39:37 +04:00
static inline int
is_id_value ( rb_objspace_t * objspace , VALUE ptr )
2010-10-26 21:27:32 +04:00
{
2012-08-05 14:39:37 +04:00
if ( ! is_pointer_to_heap ( objspace , ( void * ) ptr ) ) return FALSE ;
if ( BUILTIN_TYPE ( ptr ) > T_FIXNUM ) return FALSE ;
if ( BUILTIN_TYPE ( ptr ) = = T_ICLASS ) return FALSE ;
return TRUE ;
2010-10-26 21:27:32 +04:00
}
2012-08-05 14:39:37 +04:00
static inline int
2013-10-22 14:28:31 +04:00
heap_is_swept_object ( rb_objspace_t * objspace , rb_heap_t * heap , VALUE ptr )
2010-10-26 21:27:32 +04:00
{
2013-11-19 13:52:52 +04:00
struct heap_page * page = GET_HEAP_PAGE ( ptr ) ;
2014-09-08 08:11:00 +04:00
return page - > flags . before_sweep ? FALSE : TRUE ;
2012-11-22 19:03:46 +04:00
}
static inline int
2013-10-22 14:28:31 +04:00
is_swept_object ( rb_objspace_t * objspace , VALUE ptr )
2012-11-22 19:03:46 +04:00
{
2013-10-22 14:28:31 +04:00
if ( heap_is_swept_object ( objspace , heap_eden , ptr ) ) {
2012-11-22 19:03:46 +04:00
return TRUE ;
2013-10-22 14:28:31 +04:00
}
else {
return FALSE ;
}
}
2014-07-06 19:42:14 +04:00
/* garbage objects will be collected soon. */
2013-10-22 14:28:31 +04:00
static inline int
2014-07-06 19:42:14 +04:00
is_garbage_object ( rb_objspace_t * objspace , VALUE ptr )
2013-10-22 14:28:31 +04:00
{
2014-07-06 16:02:57 +04:00
if ( ! is_lazy_sweeping ( heap_eden ) | |
2014-07-06 16:33:12 +04:00
is_swept_object ( objspace , ptr ) | |
2014-07-06 16:02:57 +04:00
MARKED_IN_BITMAP ( GET_HEAP_MARK_BITS ( ptr ) , ptr ) ) {
return FALSE ;
}
else {
return TRUE ;
}
2010-10-26 21:27:32 +04:00
}
2012-08-05 14:39:37 +04:00
static inline int
is_live_object ( rb_objspace_t * objspace , VALUE ptr )
2010-10-26 21:27:32 +04:00
{
2013-11-01 12:25:34 +04:00
switch ( BUILTIN_TYPE ( ptr ) ) {
2014-07-06 16:02:57 +04:00
case T_NONE :
case T_ZOMBIE :
2013-11-01 12:25:34 +04:00
return FALSE ;
}
2014-07-06 16:02:57 +04:00
2014-07-06 19:42:14 +04:00
if ( ! is_garbage_object ( objspace , ptr ) ) {
2014-07-06 16:33:12 +04:00
return TRUE ;
2014-07-06 16:02:57 +04:00
}
else {
2014-07-06 16:33:12 +04:00
return FALSE ;
2014-07-06 16:02:57 +04:00
}
2010-10-26 21:27:32 +04:00
}
2013-07-18 03:19:38 +04:00
static inline int
is_markable_object ( rb_objspace_t * objspace , VALUE obj )
{
2014-09-08 08:11:00 +04:00
if ( rb_special_const_p ( obj ) ) return FALSE ; /* special const is not markable */
check_rvalue_consistency ( obj ) ;
return TRUE ;
2013-07-18 03:19:38 +04:00
}
int
rb_objspace_markable_object_p ( VALUE obj )
{
2013-12-16 13:07:52 +04:00
rb_objspace_t * objspace = & rb_objspace ;
2013-12-16 14:22:03 +04:00
return is_markable_object ( objspace , obj ) & & is_live_object ( objspace , obj ) ;
2013-07-18 03:19:38 +04:00
}
2014-07-06 16:02:57 +04:00
int
2014-07-06 19:42:14 +04:00
rb_objspace_garbage_object_p ( VALUE obj )
2014-07-06 16:02:57 +04:00
{
rb_objspace_t * objspace = & rb_objspace ;
2014-07-06 19:42:14 +04:00
return is_garbage_object ( objspace , obj ) ;
2014-07-06 16:02:57 +04:00
}
2012-08-05 14:39:37 +04:00
/*
* call - seq :
* ObjectSpace . _id2ref ( object_id ) - > an_object
*
* Converts an object id to a reference to the object . May not be
* called on an object id passed as a parameter to a finalizer .
*
* s = " I am a string " # = > " I am a string "
* r = ObjectSpace . _id2ref ( s . object_id ) # = > " I am a string "
* r = = s # = > true
*
*/
2009-07-15 18:59:41 +04:00
2012-08-05 14:39:37 +04:00
static VALUE
id2ref ( VALUE obj , VALUE objid )
1998-01-16 15:13:05 +03:00
{
2012-08-05 14:39:37 +04:00
# if SIZEOF_LONG == SIZEOF_VOIDP
# define NUM2PTR(x) NUM2ULONG(x)
# elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
# define NUM2PTR(x) NUM2ULL(x)
# endif
rb_objspace_t * objspace = & rb_objspace ;
VALUE ptr ;
void * p0 ;
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
ptr = NUM2PTR ( objid ) ;
p0 = ( void * ) ptr ;
2012-03-13 07:37:06 +04:00
2012-08-05 14:39:37 +04:00
if ( ptr = = Qtrue ) return Qtrue ;
if ( ptr = = Qfalse ) return Qfalse ;
if ( ptr = = Qnil ) return Qnil ;
if ( FIXNUM_P ( ptr ) ) return ( VALUE ) ptr ;
2012-08-23 11:22:40 +04:00
if ( FLONUM_P ( ptr ) ) return ( VALUE ) ptr ;
2012-12-22 08:25:18 +04:00
ptr = obj_id_to_ref ( objid ) ;
2001-11-27 13:00:35 +03:00
2012-08-05 14:39:37 +04:00
if ( ( ptr % sizeof ( RVALUE ) ) = = ( 4 < < 2 ) ) {
ID symid = ptr / sizeof ( RVALUE ) ;
2014-11-25 21:44:22 +03:00
if ( rb_id2str ( symid ) = = 0 )
2012-08-05 14:39:37 +04:00
rb_raise ( rb_eRangeError , " %p is not symbol id value " , p0 ) ;
return ID2SYM ( symid ) ;
}
2003-04-09 12:27:01 +04:00
2012-08-05 14:39:37 +04:00
if ( ! is_id_value ( objspace , ptr ) ) {
rb_raise ( rb_eRangeError , " %p is not id value " , p0 ) ;
2001-11-13 11:19:52 +03:00
}
2012-08-05 14:39:37 +04:00
if ( ! is_live_object ( objspace , ptr ) ) {
rb_raise ( rb_eRangeError , " %p is recycled object " , p0 ) ;
}
2015-03-01 05:18:00 +03:00
if ( RBASIC ( ptr ) - > klass = = 0 ) {
rb_raise ( rb_eRangeError , " %p is internal object " , p0 ) ;
}
2012-08-05 14:39:37 +04:00
return ( VALUE ) ptr ;
2003-11-28 17:23:33 +03:00
}
2012-08-05 14:39:37 +04:00
/*
* Document - method : __id__
* Document - method : object_id
*
* call - seq :
2012-11-29 12:15:53 +04:00
* obj . __id__ - > integer
* obj . object_id - > integer
2012-08-05 14:39:37 +04:00
*
2012-11-29 12:15:53 +04:00
* Returns an integer identifier for + obj + .
*
2014-10-14 23:57:24 +04:00
* The same number will be returned on all calls to + object_id + for a given
* object , and no two active objects will share an id .
*
* Note : that some objects of builtin classes are reused for optimization .
* This is the case for immediate values and frozen string literals .
2014-01-09 21:34:39 +04:00
*
2014-01-10 08:13:04 +04:00
* Immediate values are not passed by reference but are passed by value :
2014-10-14 23:57:24 +04:00
* + nil + , + true + , + false + , Fixnums , Symbols , and some Floats .
2014-01-09 21:34:39 +04:00
*
* Object . new . object_id = = Object . new . object_id # = > false
* ( 21 * 2 ) . object_id = = ( 21 * 2 ) . object_id # = > true
* " hello " . object_id = = " hello " . object_id # = > false
* " hi " . freeze . object_id = = " hi " . freeze . object_id # = > true
2012-08-05 14:39:37 +04:00
*/
VALUE
rb_obj_id ( VALUE obj )
{
/*
* 32 - bit VALUE space
* MSB - - - - - - - - - - - - - - - - - - - - - - - - LSB
* false 00000000000000000000000000000000
* true 00000000000000000000000000000010
* nil 00000000000000000000000000000100
* undef 00000000000000000000000000000110
* symbol ssssssssssssssssssssssss00001110
* object oooooooooooooooooooooooooooooo00 = 0 ( mod sizeof ( RVALUE ) )
* fixnum fffffffffffffffffffffffffffffff1
*
* object_id space
* LSB
* false 00000000000000000000000000000000
* true 00000000000000000000000000000010
* nil 00000000000000000000000000000100
* undef 00000000000000000000000000000110
* symbol 000 SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S . . . S % A = 4 ( S . . . S = s . . . s * A + 4 )
* object oooooooooooooooooooooooooooooo0 o . . . o % A = 0
* fixnum fffffffffffffffffffffffffffffff1 bignum if required
*
* where A = sizeof ( RVALUE ) / 4
*
* sizeof ( RVALUE ) is
* 20 if 32 - bit , double is 4 - byte aligned
* 24 if 32 - bit , double is 8 - byte aligned
* 40 if 64 - bit
*/
2014-03-26 08:57:47 +04:00
if ( STATIC_SYM_P ( obj ) ) {
2012-08-05 14:39:37 +04:00
return ( SYM2ID ( obj ) * sizeof ( RVALUE ) + ( 4 < < 2 ) ) | FIXNUM_FLAG ;
}
2012-08-23 11:22:40 +04:00
else if ( FLONUM_P ( obj ) ) {
# if SIZEOF_LONG == SIZEOF_VOIDP
return LONG2NUM ( ( SIGNED_VALUE ) obj ) ;
# else
return LL2NUM ( ( SIGNED_VALUE ) obj ) ;
# endif
}
else if ( SPECIAL_CONST_P ( obj ) ) {
return LONG2NUM ( ( SIGNED_VALUE ) obj ) ;
2012-08-05 14:39:37 +04:00
}
return nonspecial_obj_id ( obj ) ;
}
2013-11-05 08:51:01 +04:00
# include "regint.h"
static size_t
2014-08-17 14:51:33 +04:00
obj_memsize_of ( VALUE obj , int use_all_types )
2013-11-05 08:51:01 +04:00
{
size_t size = 0 ;
if ( SPECIAL_CONST_P ( obj ) ) {
return 0 ;
}
if ( FL_TEST ( obj , FL_EXIVAR ) ) {
size + = rb_generic_ivar_memsize ( obj ) ;
}
switch ( BUILTIN_TYPE ( obj ) ) {
case T_OBJECT :
if ( ! ( RBASIC ( obj ) - > flags & ROBJECT_EMBED ) & &
ROBJECT ( obj ) - > as . heap . ivptr ) {
size + = ROBJECT ( obj ) - > as . heap . numiv * sizeof ( VALUE ) ;
}
break ;
case T_MODULE :
case T_CLASS :
if ( RCLASS_M_TBL ( obj ) ) {
2015-08-12 11:43:55 +03:00
size + = rb_id_table_memsize ( RCLASS_M_TBL ( obj ) ) ;
2013-11-05 08:51:01 +04:00
}
if ( RCLASS_EXT ( obj ) ) {
if ( RCLASS_IV_TBL ( obj ) ) {
size + = st_memsize ( RCLASS_IV_TBL ( obj ) ) ;
}
if ( RCLASS_IV_INDEX_TBL ( obj ) ) {
size + = st_memsize ( RCLASS_IV_INDEX_TBL ( obj ) ) ;
}
if ( RCLASS ( obj ) - > ptr - > iv_tbl ) {
size + = st_memsize ( RCLASS ( obj ) - > ptr - > iv_tbl ) ;
}
if ( RCLASS ( obj ) - > ptr - > const_tbl ) {
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
size + = rb_id_table_memsize ( RCLASS ( obj ) - > ptr - > const_tbl ) ;
2013-11-05 08:51:01 +04:00
}
size + = sizeof ( rb_classext_t ) ;
}
break ;
2015-03-11 12:15:20 +03:00
case T_ICLASS :
if ( FL_TEST ( obj , RICLASS_IS_ORIGIN ) ) {
if ( RCLASS_M_TBL ( obj ) ) {
2015-08-12 11:43:55 +03:00
size + = rb_id_table_memsize ( RCLASS_M_TBL ( obj ) ) ;
2015-03-11 12:15:20 +03:00
}
}
break ;
2013-11-05 08:51:01 +04:00
case T_STRING :
size + = rb_str_memsize ( obj ) ;
break ;
case T_ARRAY :
size + = rb_ary_memsize ( obj ) ;
break ;
case T_HASH :
2018-12-14 04:10:15 +03:00
if ( RHASH_AR_TABLE_P ( obj ) ) {
2019-02-12 06:45:15 +03:00
if ( RHASH_AR_TABLE ( obj ) ! = NULL ) {
size_t rb_hash_ar_table_size ( ) ;
size + = rb_hash_ar_table_size ( ) ;
}
2018-10-31 01:11:51 +03:00
}
2018-10-31 01:12:12 +03:00
else {
2018-10-31 01:11:51 +03:00
VM_ASSERT ( RHASH_ST_TABLE ( obj ) ! = NULL ) ;
2018-10-31 01:12:12 +03:00
size + = st_memsize ( RHASH_ST_TABLE ( obj ) ) ;
}
2013-11-05 08:51:01 +04:00
break ;
case T_REGEXP :
2016-02-02 07:39:44 +03:00
if ( RREGEXP_PTR ( obj ) ) {
size + = onig_memsize ( RREGEXP_PTR ( obj ) ) ;
2013-11-05 08:51:01 +04:00
}
break ;
case T_DATA :
2014-08-17 14:51:33 +04:00
if ( use_all_types ) size + = rb_objspace_data_type_memsize ( obj ) ;
2013-11-05 08:51:01 +04:00
break ;
case T_MATCH :
if ( RMATCH ( obj ) - > rmatch ) {
struct rmatch * rm = RMATCH ( obj ) - > rmatch ;
size + = onig_region_memsize ( & rm - > regs ) ;
size + = sizeof ( struct rmatch_offset ) * rm - > char_offset_num_allocated ;
size + = sizeof ( struct rmatch ) ;
}
break ;
case T_FILE :
if ( RFILE ( obj ) - > fptr ) {
size + = rb_io_memsize ( RFILE ( obj ) - > fptr ) ;
}
break ;
case T_RATIONAL :
case T_COMPLEX :
2015-03-11 13:36:17 +03:00
case T_IMEMO :
2018-05-09 10:11:59 +03:00
if ( imemo_type_p ( obj , imemo_tmpbuf ) ) {
2017-10-21 11:40:28 +03:00
size + = RANY ( obj ) - > as . imemo . alloc . cnt * sizeof ( VALUE ) ;
}
2013-11-05 08:51:01 +04:00
break ;
case T_FLOAT :
2014-03-26 08:57:47 +04:00
case T_SYMBOL :
2013-11-05 08:51:01 +04:00
break ;
case T_BIGNUM :
2014-02-16 01:17:34 +04:00
if ( ! ( RBASIC ( obj ) - > flags & BIGNUM_EMBED_FLAG ) & & BIGNUM_DIGITS ( obj ) ) {
size + = BIGNUM_LEN ( obj ) * sizeof ( BDIGIT ) ;
2013-11-05 08:51:01 +04:00
}
break ;
2014-08-15 07:00:31 +04:00
2013-11-05 08:51:01 +04:00
case T_NODE :
2017-11-04 17:32:48 +03:00
UNEXPECTED_NODE ( obj_memsize_of ) ;
2014-08-15 07:00:31 +04:00
break ;
2013-11-05 08:51:01 +04:00
case T_STRUCT :
if ( ( RBASIC ( obj ) - > flags & RSTRUCT_EMBED_LEN_MASK ) = = 0 & &
RSTRUCT ( obj ) - > as . heap . ptr ) {
size + = sizeof ( VALUE ) * RSTRUCT_LEN ( obj ) ;
}
break ;
case T_ZOMBIE :
2014-06-17 07:27:26 +04:00
break ;
2013-11-05 08:51:01 +04:00
default :
rb_bug ( " objspace/memsize_of(): unknown data type 0x%x(%p) " ,
BUILTIN_TYPE ( obj ) , ( void * ) obj ) ;
}
2014-12-15 11:54:07 +03:00
return size + sizeof ( RVALUE ) ;
2013-11-05 08:51:01 +04:00
}
size_t
rb_obj_memsize_of ( VALUE obj )
{
return obj_memsize_of ( obj , TRUE ) ;
}
2012-08-05 14:39:37 +04:00
static int
set_zero ( st_data_t key , st_data_t val , st_data_t arg )
{
VALUE k = ( VALUE ) key ;
VALUE hash = ( VALUE ) arg ;
rb_hash_aset ( hash , k , INT2FIX ( 0 ) ) ;
return ST_CONTINUE ;
}
/*
* call - seq :
* ObjectSpace . count_objects ( [ result_hash ] ) - > hash
*
2015-04-14 03:01:03 +03:00
* Counts all objects grouped by type .
2012-08-05 14:39:37 +04:00
*
2012-11-29 12:15:53 +04:00
* It returns a hash , such as :
* {
* : TOTAL = > 10000 ,
* : FREE = > 3011 ,
* : T_OBJECT = > 6 ,
* : T_CLASS = > 404 ,
* # . . .
* }
2012-08-05 14:39:37 +04:00
*
2012-11-29 12:15:53 +04:00
* The contents of the returned hash are implementation specific .
2015-06-14 13:43:12 +03:00
* It may be changed in future .
*
* The keys starting with + : T_ + means live objects .
* For example , + : T_ARRAY + is the number of arrays .
* + : FREE + means object slots which is not used now .
* + : TOTAL + means sum of above .
2012-08-05 14:39:37 +04:00
*
2012-11-29 12:15:53 +04:00
* If the optional argument + result_hash + is given ,
* it is overwritten and returned . This is intended to avoid probe effect .
*
2015-04-14 03:01:03 +03:00
* h = { }
* ObjectSpace . count_objects ( h )
* puts h
* # = > { : TOTAL = > 10000 , : T_CLASS = > 158280 , : T_MODULE = > 20672 , : T_STRING = > 527249 }
*
2012-11-29 12:15:53 +04:00
* This method is only expected to work on C Ruby .
2012-08-05 14:39:37 +04:00
*
*/
static VALUE
count_objects ( int argc , VALUE * argv , VALUE os )
{
rb_objspace_t * objspace = & rb_objspace ;
size_t counts [ T_MASK + 1 ] ;
size_t freed = 0 ;
size_t total = 0 ;
size_t i ;
2018-12-06 10:49:24 +03:00
VALUE hash = Qnil ;
2012-08-05 14:39:37 +04:00
2018-12-06 10:49:24 +03:00
if ( rb_check_arity ( argc , 0 , 1 ) = = 1 ) {
hash = argv [ 0 ] ;
2012-08-05 14:39:37 +04:00
if ( ! RB_TYPE_P ( hash , T_HASH ) )
rb_raise ( rb_eTypeError , " non-hash given " ) ;
}
for ( i = 0 ; i < = T_MASK ; i + + ) {
counts [ i ] = 0 ;
}
2014-09-09 13:33:52 +04:00
for ( i = 0 ; i < heap_allocated_pages ; i + + ) {
2013-10-22 14:28:31 +04:00
struct heap_page * page = heap_pages_sorted [ i ] ;
2013-07-17 10:52:13 +04:00
RVALUE * p , * pend ;
2012-08-05 14:39:37 +04:00
2014-09-08 08:11:00 +04:00
p = page - > start ; pend = p + page - > total_slots ;
2013-07-17 10:52:13 +04:00
for ( ; p < pend ; p + + ) {
if ( p - > as . basic . flags ) {
counts [ BUILTIN_TYPE ( p ) ] + + ;
}
else {
freed + + ;
}
}
2014-09-08 08:11:00 +04:00
total + = page - > total_slots ;
2012-08-05 14:39:37 +04:00
}
if ( hash = = Qnil ) {
hash = rb_hash_new ( ) ;
}
else if ( ! RHASH_EMPTY_P ( hash ) ) {
2018-10-31 01:11:51 +03:00
rb_hash_stlike_foreach ( hash , set_zero , hash ) ;
2012-08-05 14:39:37 +04:00
}
rb_hash_aset ( hash , ID2SYM ( rb_intern ( " TOTAL " ) ) , SIZET2NUM ( total ) ) ;
rb_hash_aset ( hash , ID2SYM ( rb_intern ( " FREE " ) ) , SIZET2NUM ( freed ) ) ;
for ( i = 0 ; i < = T_MASK ; i + + ) {
VALUE type ;
switch ( i ) {
# define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
COUNT_TYPE ( T_NONE ) ;
COUNT_TYPE ( T_OBJECT ) ;
COUNT_TYPE ( T_CLASS ) ;
COUNT_TYPE ( T_MODULE ) ;
COUNT_TYPE ( T_FLOAT ) ;
COUNT_TYPE ( T_STRING ) ;
COUNT_TYPE ( T_REGEXP ) ;
COUNT_TYPE ( T_ARRAY ) ;
COUNT_TYPE ( T_HASH ) ;
COUNT_TYPE ( T_STRUCT ) ;
COUNT_TYPE ( T_BIGNUM ) ;
COUNT_TYPE ( T_FILE ) ;
COUNT_TYPE ( T_DATA ) ;
COUNT_TYPE ( T_MATCH ) ;
COUNT_TYPE ( T_COMPLEX ) ;
COUNT_TYPE ( T_RATIONAL ) ;
COUNT_TYPE ( T_NIL ) ;
COUNT_TYPE ( T_TRUE ) ;
COUNT_TYPE ( T_FALSE ) ;
COUNT_TYPE ( T_SYMBOL ) ;
COUNT_TYPE ( T_FIXNUM ) ;
2015-03-11 13:36:17 +03:00
COUNT_TYPE ( T_IMEMO ) ;
2012-08-05 14:39:37 +04:00
COUNT_TYPE ( T_UNDEF ) ;
COUNT_TYPE ( T_ICLASS ) ;
COUNT_TYPE ( T_ZOMBIE ) ;
# undef COUNT_TYPE
default : type = INT2NUM ( i ) ; break ;
}
if ( counts [ i ] )
rb_hash_aset ( hash , type , SIZET2NUM ( counts [ i ] ) ) ;
}
return hash ;
}
/*
- - - - - - - - - - - - - - - - - - - - - - - - Garbage Collection - - - - - - - - - - - - - - - - - - - - - - - -
*/
/* Sweeping */
2013-07-18 03:19:38 +04:00
static size_t
2014-09-10 05:42:09 +04:00
objspace_available_slots ( rb_objspace_t * objspace )
2013-07-18 03:19:38 +04:00
{
2014-09-10 05:42:09 +04:00
return heap_eden - > total_slots + heap_tomb - > total_slots ;
2013-07-18 03:19:38 +04:00
}
2013-10-23 12:48:54 +04:00
static size_t
2014-09-10 05:42:09 +04:00
objspace_live_slots ( rb_objspace_t * objspace )
2013-10-23 12:48:54 +04:00
{
2014-09-10 05:42:09 +04:00
return ( objspace - > total_allocated_objects - objspace - > profile . total_freed_objects ) - heap_pages_final_slots ;
2013-10-23 12:48:54 +04:00
}
2013-10-04 14:05:40 +04:00
static size_t
2014-09-10 05:42:09 +04:00
objspace_free_slots ( rb_objspace_t * objspace )
2013-10-04 14:05:40 +04:00
{
2014-09-10 05:42:09 +04:00
return objspace_available_slots ( objspace ) - objspace_live_slots ( objspace ) - heap_pages_final_slots ;
2013-10-04 14:05:40 +04:00
}
2013-06-07 06:32:57 +04:00
static void
2013-10-18 10:33:36 +04:00
gc_setup_mark_bits ( struct heap_page * page )
2012-11-29 09:05:19 +04:00
{
2013-06-21 23:42:04 +04:00
# if USE_RGENGC
/* copy oldgen bitmap to mark bitmap */
2016-01-09 01:15:40 +03:00
memcpy ( & page - > mark_bits [ 0 ] , & page - > uncollectible_bits [ 0 ] , HEAP_PAGE_BITMAP_SIZE ) ;
2013-06-21 23:42:04 +04:00
# else
/* clear mark bitmap */
2016-01-09 01:15:40 +03:00
memset ( & page - > mark_bits [ 0 ] , 0 , HEAP_PAGE_BITMAP_SIZE ) ;
2013-06-07 06:32:57 +04:00
# endif
2013-06-21 23:42:04 +04:00
}
2012-11-29 09:05:19 +04:00
2016-03-31 09:51:27 +03:00
static inline int
2013-10-22 14:28:31 +04:00
gc_page_sweep ( rb_objspace_t * objspace , rb_heap_t * heap , struct heap_page * sweep_page )
2012-08-05 14:39:37 +04:00
{
2013-06-07 06:32:57 +04:00
int i ;
2014-09-08 08:11:00 +04:00
int empty_slots = 0 , freed_slots = 0 , final_slots = 0 ;
2013-06-07 06:32:57 +04:00
RVALUE * p , * pend , * offset ;
2013-06-21 03:15:18 +04:00
bits_t * bits , bitset ;
2001-11-13 11:19:52 +03:00
2014-09-08 08:11:00 +04:00
gc_report ( 2 , objspace , " page_sweep: start. \n " ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2014-09-08 08:11:00 +04:00
sweep_page - > flags . before_sweep = FALSE ;
2013-11-01 17:47:39 +04:00
2014-09-08 08:11:00 +04:00
p = sweep_page - > start ; pend = p + sweep_page - > total_slots ;
2013-10-18 10:33:36 +04:00
offset = p - NUM_IN_PAGE ( p ) ;
2013-11-09 04:23:02 +04:00
bits = sweep_page - > mark_bits ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2013-06-07 06:32:57 +04:00
/* create guard : fill 1 out-of-range */
bits [ BITMAP_INDEX ( p ) ] | = BITMAP_BIT ( p ) - 1 ;
bits [ BITMAP_INDEX ( pend ) ] | = ~ ( BITMAP_BIT ( pend ) - 1 ) ;
2016-01-09 01:15:40 +03:00
for ( i = 0 ; i < HEAP_PAGE_BITMAP_LIMIT ; i + + ) {
2013-06-07 06:32:57 +04:00
bitset = ~ bits [ i ] ;
2013-06-07 06:52:42 +04:00
if ( bitset ) {
2013-06-21 03:15:18 +04:00
p = offset + i * BITS_BITLENGTH ;
2013-06-07 06:32:57 +04:00
do {
2018-11-06 13:06:07 +03:00
unpoison_object ( ( VALUE ) p , false ) ;
2014-06-05 11:14:53 +04:00
if ( bitset & 1 ) {
switch ( BUILTIN_TYPE ( p ) ) {
default : { /* majority case */
fix SEGV inspecting already freed objects
obj_info() assumes the given object is alive. Passing freed
objects to it results in SEGV.
(lldb) run
Process 29718 launched: './miniruby' (x86_64)
Process 29718 stopped
* thread #1: tid = 0x3082c5, 0x00000001000bfaab miniruby`pathobj_path(pathobj=4478683640) + 70 at vm_core.h:269, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
frame #0: 0x00000001000bfaab miniruby`pathobj_path(pathobj=4478683640) + 70 at vm_core.h:269
266 }
267 else {
268 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
-> 269 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
270 }
271 }
272
(lldb) bt
* thread #1: tid = 0x3082c5, 0x00000001000bfaab miniruby`pathobj_path(pathobj=4478683640) + 70 at vm_core.h:269, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
* frame #0: 0x00000001000bfaab miniruby`pathobj_path(pathobj=4478683640) + 70 at vm_core.h:269
frame #1: 0x00000001000c25ff miniruby`rb_iseq_path(iseq=0x000000010af34a20) + 32 at iseq.c:723
frame #2: 0x000000010009db09 miniruby`rb_raw_iseq_info(buff="0x000000010af34a20 [1 ] T_IMEMO iseq", buff_size=256, iseq=0x000000010af34a20) + 69 at gc.c:9274
frame #3: 0x000000010009e45a miniruby`rb_raw_obj_info(buff="0x000000010af34a20 [1 ] T_IMEMO iseq", buff_size=256, obj=4478683680) + 2191 at gc.c:9397
frame #4: 0x000000010009e4d5 miniruby`obj_info(obj=4478683680) + 98 at gc.c:9429
frame #5: 0x0000000100091ae3 miniruby`gc_page_sweep(objspace=0x00000001007d3280, heap=0x00000001007d32a0, sweep_page=0x000000010ae07bc0) + 622 at gc.c:3529
frame #6: 0x000000010009206a miniruby`gc_sweep_step(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 188 at gc.c:3705
frame #7: 0x0000000100092254 miniruby`gc_sweep_continue(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 133 at gc.c:3772
frame #8: 0x000000010008d7f9 miniruby`heap_prepare(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 48 at gc.c:1746
frame #9: 0x000000010008d8a1 miniruby`heap_get_freeobj_from_next_freepage(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 37 at gc.c:1769
frame #10: 0x000000010008d98d miniruby`heap_get_freeobj(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 83 at gc.c:1803
frame #11: 0x000000010008dcb0 miniruby`newobj_slowpath(klass=4334386280, flags=5, v1=0, v2=0, v3=0, objspace=0x00000001007d3280, wb_protected=1) + 220 at gc.c:1930
frame #12: 0x000000010008dd6c miniruby`newobj_slowpath_wb_protected(klass=4334386280, flags=5, v1=0, v2=0, v3=0, objspace=0x00000001007d3280) + 76 at gc.c:1942
frame #13: 0x000000010008dea1 miniruby`newobj_of(klass=4334386280, flags=5, v1=0, v2=0, v3=0, wb_protected=1) + 221 at gc.c:1974
frame #14: 0x000000010008df39 miniruby`rb_wb_protected_newobj_of(klass=4334386280, flags=5) + 54 at gc.c:1990
frame #15: 0x0000000100195f7c miniruby`str_alloc(klass=4334386280) + 29 at string.c:692
frame #16: 0x0000000100195fe9 miniruby`str_new0(klass=4334386280, ptr="gitm", len=4, termlen=1) + 73 at string.c:714
frame #17: 0x000000010019633e miniruby`rb_enc_str_new(ptr="gitm", len=4, enc=0x00000001025d50a0) + 81 at string.c:766
frame #18: 0x000000010010a80a miniruby`parser_str_new(p="gitm", n=4, enc=0x00000001025d50a0, func=66, enc0=0x00000001025d50a0) + 50 at parse.y:5817
frame #19: 0x000000010010ce1a miniruby`parser_parse_string(parser=0x00000001042ac5c0, quote=0x000000010460c028) + 795 at parse.y:6675
frame #20: 0x00000001001120bd miniruby`parser_yylex(parser=0x00000001042ac5c0) + 159 at parse.y:8281
frame #21: 0x0000000100115068 miniruby`yylex(lval=0x00007fff5fbf9948, yylloc=0x00007fff5fbf9ab0, parser=0x00000001042ac5c0) + 55 at parse.y:8931
frame #22: 0x00000001000fc79f miniruby`ruby_yyparse(parser=0x00000001042ac5c0) + 1198 at parse.c:5798
frame #23: 0x0000000100109f5a miniruby`yycompile0(arg=4364879296) + 317 at parse.y:5595
frame #24: 0x0000000100214ef0 miniruby`rb_suppress_tracing(func=(miniruby`yycompile0 at parse.y:5565), arg=4364879296) + 349 at vm_trace.c:397
frame #25: 0x000000010010a1df miniruby`yycompile(parser=0x00000001042ac5c0, fname=4443743440, line=1) + 126 at parse.y:5637
frame #26: 0x000000010010a4c1 miniruby`parser_compile_string(vparser=4443743480, fname=4443743440, s=4443743520, line=1) + 191 at parse.y:5706
frame #27: 0x000000010010a5b7 miniruby`rb_parser_compile_string_path(vparser=4443743480, f=4443743440, s=4443743520, line=1) + 58 at parse.y:5730
frame #28: 0x0000000100206025 miniruby`eval_make_iseq(src=4443743520, fname=4443743440, line=1, bind=0x0000000000000000, base_block=0x00007fff5fbfb370) + 266 at vm_eval.c:1274
frame #29: 0x0000000100206153 miniruby`eval_string_with_cref(self=4334412520, src=4443743520, cref=0x0000000000000000, file=52, line=1) + 197 at vm_eval.c:1307
frame #30: 0x0000000100206389 miniruby`rb_f_eval(argc=1, argv=0x0000000102400eb8, self=4334412520) + 219 at vm_eval.c:1382
frame #31: 0x00000001001f247c miniruby`call_cfunc_m1(func=(miniruby`rb_f_eval at vm_eval.c:1364), recv=4334412520, argc=1, argv=0x0000000102400eb8) + 47 at vm_insnhelper.c:1723
frame #32: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x0000000102500d80, calling=0x00007fff5fbfbf50, ci=0x000000010263f240, cc=0x0000000100749b50) + 386 at vm_insnhelper.c:1918
frame #33: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x0000000102500d80, calling=0x00007fff5fbfbf50, ci=0x000000010263f240, cc=0x0000000100749b50) + 149 at vm_insnhelper.c:1934
frame #34: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #35: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #36: 0x00000001002093f8 miniruby`invoke_block(ec=0x00000001007d3548, iseq=0x000000010252d7f0, self=4334412520, captured=0x0000000102500df8, cref=0x0000000000000000, type=572653569, opt_pc=0) + 224 at vm.c:988
frame #37: 0x0000000100209766 miniruby`invoke_iseq_block_from_c(ec=0x00000001007d3548, captured=0x0000000102500df8, self=4334412520, argc=0, argv=0x0000000000000000, passed_block_handler=0, cref=0x0000000000000000, is_lambda=0) + 389 at vm.c:1040
frame #38: 0x0000000100209824 miniruby`invoke_block_from_c_bh(ec=0x00000001007d3548, block_handler=4333768185, argc=0, argv=0x0000000000000000, passed_block_handler=0, cref=0x0000000000000000, is_lambda=0, force_blockarg=0) + 138 at vm.c:1058
frame #39: 0x00000001002099d0 miniruby`vm_yield(ec=0x00000001007d3548, argc=0, argv=0x0000000000000000) + 69 at vm.c:1103
frame #40: 0x0000000100205623 miniruby`rb_yield_0(argc=0, argv=0x0000000000000000) + 40 at vm_eval.c:970
frame #41: 0x0000000100205964 miniruby`loop_i + 19 at vm_eval.c:1049
frame #42: 0x000000010007db07 miniruby`rb_rescue2(b_proc=(miniruby`loop_i at vm_eval.c:1047), data1=0, r_proc=(miniruby`loop_stop at vm_eval.c:1056), data2=0) + 369 at eval.c:896
frame #43: 0x0000000100205a2e miniruby`rb_f_loop(self=4334412520) + 121 at vm_eval.c:1100
frame #44: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`rb_f_loop at vm_eval.c:1098), recv=4334412520, argc=0, argv=0x0000000102400e80) + 41 at vm_insnhelper.c:1729
frame #45: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 386 at vm_insnhelper.c:1918
frame #46: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 149 at vm_insnhelper.c:1934
frame #47: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 239 at vm_insnhelper.c:2232
frame #48: 0x00000001001f4a2c miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 253 at vm_insnhelper.c:2366
frame #49: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 59 at vm_insnhelper.c:2398
frame #50: 0x00000001001fab2f miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 7480 at insns.def:850
frame #51: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #52: 0x000000010020c40f miniruby`rb_iseq_eval_main(iseq=0x000000010252dd90) + 52 at vm.c:2019
frame #53: 0x000000010007c768 miniruby`ruby_exec_internal(n=0x000000010252dd90) + 297 at eval.c:246
frame #54: 0x000000010007c88e miniruby`ruby_exec_node(n=0x000000010252dd90) + 36 at eval.c:310
frame #55: 0x000000010007c861 miniruby`ruby_run_node(n=0x000000010252dd90) + 62 at eval.c:302
frame #56: 0x000000010000138d miniruby`main(argc=2, argv=0x00007fff5fbfdbf0) + 113 at main.c:42
frame #57: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb) p ((struct RVALUE*)pathobj)->as.basic
(RBasic) $0 = (flags = 0, klass = 4478683600)
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61568 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:04 +03:00
gc_report ( 2 , objspace , " page_sweep: free %p \n " , ( void * ) p ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if USE_RGENGC && RGENGC_CHECK_MODE
2014-09-08 08:11:00 +04:00
if ( ! is_full_marking ( objspace ) ) {
fix SEGV inspecting already freed objects
obj_info() assumes the given object is alive. Passing freed
objects to it results in SEGV.
(lldb) run
Process 29718 launched: './miniruby' (x86_64)
Process 29718 stopped
* thread #1: tid = 0x3082c5, 0x00000001000bfaab miniruby`pathobj_path(pathobj=4478683640) + 70 at vm_core.h:269, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
frame #0: 0x00000001000bfaab miniruby`pathobj_path(pathobj=4478683640) + 70 at vm_core.h:269
266 }
267 else {
268 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
-> 269 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
270 }
271 }
272
(lldb) bt
* thread #1: tid = 0x3082c5, 0x00000001000bfaab miniruby`pathobj_path(pathobj=4478683640) + 70 at vm_core.h:269, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
* frame #0: 0x00000001000bfaab miniruby`pathobj_path(pathobj=4478683640) + 70 at vm_core.h:269
frame #1: 0x00000001000c25ff miniruby`rb_iseq_path(iseq=0x000000010af34a20) + 32 at iseq.c:723
frame #2: 0x000000010009db09 miniruby`rb_raw_iseq_info(buff="0x000000010af34a20 [1 ] T_IMEMO iseq", buff_size=256, iseq=0x000000010af34a20) + 69 at gc.c:9274
frame #3: 0x000000010009e45a miniruby`rb_raw_obj_info(buff="0x000000010af34a20 [1 ] T_IMEMO iseq", buff_size=256, obj=4478683680) + 2191 at gc.c:9397
frame #4: 0x000000010009e4d5 miniruby`obj_info(obj=4478683680) + 98 at gc.c:9429
frame #5: 0x0000000100091ae3 miniruby`gc_page_sweep(objspace=0x00000001007d3280, heap=0x00000001007d32a0, sweep_page=0x000000010ae07bc0) + 622 at gc.c:3529
frame #6: 0x000000010009206a miniruby`gc_sweep_step(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 188 at gc.c:3705
frame #7: 0x0000000100092254 miniruby`gc_sweep_continue(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 133 at gc.c:3772
frame #8: 0x000000010008d7f9 miniruby`heap_prepare(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 48 at gc.c:1746
frame #9: 0x000000010008d8a1 miniruby`heap_get_freeobj_from_next_freepage(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 37 at gc.c:1769
frame #10: 0x000000010008d98d miniruby`heap_get_freeobj(objspace=0x00000001007d3280, heap=0x00000001007d32a0) + 83 at gc.c:1803
frame #11: 0x000000010008dcb0 miniruby`newobj_slowpath(klass=4334386280, flags=5, v1=0, v2=0, v3=0, objspace=0x00000001007d3280, wb_protected=1) + 220 at gc.c:1930
frame #12: 0x000000010008dd6c miniruby`newobj_slowpath_wb_protected(klass=4334386280, flags=5, v1=0, v2=0, v3=0, objspace=0x00000001007d3280) + 76 at gc.c:1942
frame #13: 0x000000010008dea1 miniruby`newobj_of(klass=4334386280, flags=5, v1=0, v2=0, v3=0, wb_protected=1) + 221 at gc.c:1974
frame #14: 0x000000010008df39 miniruby`rb_wb_protected_newobj_of(klass=4334386280, flags=5) + 54 at gc.c:1990
frame #15: 0x0000000100195f7c miniruby`str_alloc(klass=4334386280) + 29 at string.c:692
frame #16: 0x0000000100195fe9 miniruby`str_new0(klass=4334386280, ptr="gitm", len=4, termlen=1) + 73 at string.c:714
frame #17: 0x000000010019633e miniruby`rb_enc_str_new(ptr="gitm", len=4, enc=0x00000001025d50a0) + 81 at string.c:766
frame #18: 0x000000010010a80a miniruby`parser_str_new(p="gitm", n=4, enc=0x00000001025d50a0, func=66, enc0=0x00000001025d50a0) + 50 at parse.y:5817
frame #19: 0x000000010010ce1a miniruby`parser_parse_string(parser=0x00000001042ac5c0, quote=0x000000010460c028) + 795 at parse.y:6675
frame #20: 0x00000001001120bd miniruby`parser_yylex(parser=0x00000001042ac5c0) + 159 at parse.y:8281
frame #21: 0x0000000100115068 miniruby`yylex(lval=0x00007fff5fbf9948, yylloc=0x00007fff5fbf9ab0, parser=0x00000001042ac5c0) + 55 at parse.y:8931
frame #22: 0x00000001000fc79f miniruby`ruby_yyparse(parser=0x00000001042ac5c0) + 1198 at parse.c:5798
frame #23: 0x0000000100109f5a miniruby`yycompile0(arg=4364879296) + 317 at parse.y:5595
frame #24: 0x0000000100214ef0 miniruby`rb_suppress_tracing(func=(miniruby`yycompile0 at parse.y:5565), arg=4364879296) + 349 at vm_trace.c:397
frame #25: 0x000000010010a1df miniruby`yycompile(parser=0x00000001042ac5c0, fname=4443743440, line=1) + 126 at parse.y:5637
frame #26: 0x000000010010a4c1 miniruby`parser_compile_string(vparser=4443743480, fname=4443743440, s=4443743520, line=1) + 191 at parse.y:5706
frame #27: 0x000000010010a5b7 miniruby`rb_parser_compile_string_path(vparser=4443743480, f=4443743440, s=4443743520, line=1) + 58 at parse.y:5730
frame #28: 0x0000000100206025 miniruby`eval_make_iseq(src=4443743520, fname=4443743440, line=1, bind=0x0000000000000000, base_block=0x00007fff5fbfb370) + 266 at vm_eval.c:1274
frame #29: 0x0000000100206153 miniruby`eval_string_with_cref(self=4334412520, src=4443743520, cref=0x0000000000000000, file=52, line=1) + 197 at vm_eval.c:1307
frame #30: 0x0000000100206389 miniruby`rb_f_eval(argc=1, argv=0x0000000102400eb8, self=4334412520) + 219 at vm_eval.c:1382
frame #31: 0x00000001001f247c miniruby`call_cfunc_m1(func=(miniruby`rb_f_eval at vm_eval.c:1364), recv=4334412520, argc=1, argv=0x0000000102400eb8) + 47 at vm_insnhelper.c:1723
frame #32: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x0000000102500d80, calling=0x00007fff5fbfbf50, ci=0x000000010263f240, cc=0x0000000100749b50) + 386 at vm_insnhelper.c:1918
frame #33: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x0000000102500d80, calling=0x00007fff5fbfbf50, ci=0x000000010263f240, cc=0x0000000100749b50) + 149 at vm_insnhelper.c:1934
frame #34: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #35: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #36: 0x00000001002093f8 miniruby`invoke_block(ec=0x00000001007d3548, iseq=0x000000010252d7f0, self=4334412520, captured=0x0000000102500df8, cref=0x0000000000000000, type=572653569, opt_pc=0) + 224 at vm.c:988
frame #37: 0x0000000100209766 miniruby`invoke_iseq_block_from_c(ec=0x00000001007d3548, captured=0x0000000102500df8, self=4334412520, argc=0, argv=0x0000000000000000, passed_block_handler=0, cref=0x0000000000000000, is_lambda=0) + 389 at vm.c:1040
frame #38: 0x0000000100209824 miniruby`invoke_block_from_c_bh(ec=0x00000001007d3548, block_handler=4333768185, argc=0, argv=0x0000000000000000, passed_block_handler=0, cref=0x0000000000000000, is_lambda=0, force_blockarg=0) + 138 at vm.c:1058
frame #39: 0x00000001002099d0 miniruby`vm_yield(ec=0x00000001007d3548, argc=0, argv=0x0000000000000000) + 69 at vm.c:1103
frame #40: 0x0000000100205623 miniruby`rb_yield_0(argc=0, argv=0x0000000000000000) + 40 at vm_eval.c:970
frame #41: 0x0000000100205964 miniruby`loop_i + 19 at vm_eval.c:1049
frame #42: 0x000000010007db07 miniruby`rb_rescue2(b_proc=(miniruby`loop_i at vm_eval.c:1047), data1=0, r_proc=(miniruby`loop_stop at vm_eval.c:1056), data2=0) + 369 at eval.c:896
frame #43: 0x0000000100205a2e miniruby`rb_f_loop(self=4334412520) + 121 at vm_eval.c:1100
frame #44: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`rb_f_loop at vm_eval.c:1098), recv=4334412520, argc=0, argv=0x0000000102400e80) + 41 at vm_insnhelper.c:1729
frame #45: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 386 at vm_insnhelper.c:1918
frame #46: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 149 at vm_insnhelper.c:1934
frame #47: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 239 at vm_insnhelper.c:2232
frame #48: 0x00000001001f4a2c miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 253 at vm_insnhelper.c:2366
frame #49: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x0000000102500de0, calling=0x00007fff5fbfd4d0, ci=0x000000010263bbf0, cc=0x0000000102642118) + 59 at vm_insnhelper.c:2398
frame #50: 0x00000001001fab2f miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 7480 at insns.def:850
frame #51: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #52: 0x000000010020c40f miniruby`rb_iseq_eval_main(iseq=0x000000010252dd90) + 52 at vm.c:2019
frame #53: 0x000000010007c768 miniruby`ruby_exec_internal(n=0x000000010252dd90) + 297 at eval.c:246
frame #54: 0x000000010007c88e miniruby`ruby_exec_node(n=0x000000010252dd90) + 36 at eval.c:310
frame #55: 0x000000010007c861 miniruby`ruby_run_node(n=0x000000010252dd90) + 62 at eval.c:302
frame #56: 0x000000010000138d miniruby`main(argc=2, argv=0x00007fff5fbfdbf0) + 113 at main.c:42
frame #57: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb) p ((struct RVALUE*)pathobj)->as.basic
(RBasic) $0 = (flags = 0, klass = 4478683600)
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61568 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:04 +03:00
if ( RVALUE_OLD_P ( ( VALUE ) p ) ) rb_bug ( " page_sweep: %p - old while minor GC. " , ( void * ) p ) ;
if ( rgengc_remembered ( objspace , ( VALUE ) p ) ) rb_bug ( " page_sweep: %p - remembered. " , ( void * ) p ) ;
2014-09-08 08:11:00 +04:00
}
2014-06-05 11:14:53 +04:00
# endif
if ( obj_free ( objspace , ( VALUE ) p ) ) {
final_slots + + ;
}
else {
( void ) VALGRIND_MAKE_MEM_UNDEFINED ( ( void * ) p , sizeof ( RVALUE ) ) ;
heap_page_add_freeobj ( objspace , sweep_page , ( VALUE ) p ) ;
2014-09-08 08:11:00 +04:00
gc_report ( 3 , objspace , " page_sweep: %s is added to freelist \n " , obj_info ( ( VALUE ) p ) ) ;
2014-06-05 11:14:53 +04:00
freed_slots + + ;
2018-11-06 13:06:07 +03:00
poison_object ( ( VALUE ) p ) ;
2014-06-05 11:14:53 +04:00
}
break ;
}
2014-09-08 08:11:00 +04:00
/* minor cases */
2014-06-05 11:14:53 +04:00
case T_ZOMBIE :
/* already counted */
break ;
case T_NONE :
empty_slots + + ; /* already freed */
break ;
2013-06-07 06:32:57 +04:00
}
}
p + + ;
bitset > > = 1 ;
} while ( bitset ) ;
}
2012-08-05 14:39:37 +04:00
}
2013-06-21 04:24:14 +04:00
2013-10-18 10:33:36 +04:00
gc_setup_mark_bits ( sweep_page ) ;
2013-06-07 06:32:57 +04:00
2013-06-21 04:24:14 +04:00
# if GC_PROFILE_MORE_DETAIL
2013-12-18 11:58:04 +04:00
if ( gc_prof_enabled ( objspace ) ) {
2013-06-21 04:24:14 +04:00
gc_profile_record * record = gc_prof_record ( objspace ) ;
2013-11-24 23:18:53 +04:00
record - > removing_objects + = final_slots + freed_slots ;
record - > empty_objects + = empty_slots ;
2013-06-21 04:24:14 +04:00
}
# endif
2014-09-08 08:11:00 +04:00
if ( 0 ) fprintf ( stderr , " gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d \n " ,
( int ) rb_gc_count ( ) ,
( int ) sweep_page - > total_slots ,
freed_slots , empty_slots , final_slots ) ;
2013-06-21 04:24:14 +04:00
2016-03-31 11:21:35 +03:00
sweep_page - > free_slots = freed_slots + empty_slots ;
2014-09-09 14:01:18 +04:00
objspace - > profile . total_freed_objects + = freed_slots ;
2013-11-24 23:18:53 +04:00
heap_pages_final_slots + = final_slots ;
2014-06-05 11:14:53 +04:00
sweep_page - > final_slots + = final_slots ;
1999-01-20 07:59:39 +03:00
2013-10-22 14:28:31 +04:00
if ( heap_pages_deferred_final & & ! finalizing ) {
2012-08-05 14:39:37 +04:00
rb_thread_t * th = GET_THREAD ( ) ;
if ( th ) {
2016-03-18 10:37:07 +03:00
gc_finalize_deferred_register ( objspace ) ;
2012-08-05 14:39:37 +04:00
}
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2014-09-08 08:11:00 +04:00
gc_report ( 2 , objspace , " page_sweep: end. \n " ) ;
2016-03-31 09:51:27 +03:00
return freed_slots + empty_slots ;
2013-05-13 20:34:25 +04:00
}
2013-10-22 14:28:31 +04:00
/* allocate additional minimum page to work */
static void
gc_heap_prepare_minimum_pages ( rb_objspace_t * objspace , rb_heap_t * heap )
{
2014-09-08 08:11:00 +04:00
if ( ! heap - > free_pages & & heap_increment ( objspace , heap ) = = FALSE ) {
2013-10-22 14:28:31 +04:00
/* there is no free after page_sweep() */
2014-05-15 13:54:49 +04:00
heap_set_increment ( objspace , 1 ) ;
2013-10-23 14:16:01 +04:00
if ( ! heap_increment ( objspace , heap ) ) { /* can't allocate additional free objects */
2013-10-22 14:28:31 +04:00
rb_memerror ( ) ;
}
}
}
2016-03-04 12:53:03 +03:00
static const char *
gc_mode_name ( enum gc_mode mode )
{
switch ( mode ) {
case gc_mode_none : return " none " ;
case gc_mode_marking : return " marking " ;
case gc_mode_sweeping : return " sweeping " ;
default : rb_bug ( " gc_mode_name: unknown mode: %d " , ( int ) mode ) ;
}
}
2014-09-09 07:13:25 +04:00
static void
2016-03-04 12:53:03 +03:00
gc_mode_transition ( rb_objspace_t * objspace , enum gc_mode mode )
2014-09-09 07:13:25 +04:00
{
2014-09-09 10:39:08 +04:00
# if RGENGC_CHECK_MODE
2016-03-04 12:53:03 +03:00
enum gc_mode prev_mode = gc_mode ( objspace ) ;
switch ( prev_mode ) {
2017-06-22 08:03:18 +03:00
case gc_mode_none : GC_ASSERT ( mode = = gc_mode_marking ) ; break ;
case gc_mode_marking : GC_ASSERT ( mode = = gc_mode_sweeping ) ; break ;
case gc_mode_sweeping : GC_ASSERT ( mode = = gc_mode_none ) ; break ;
2014-09-09 07:13:25 +04:00
}
# endif
2016-03-04 12:53:03 +03:00
if ( 0 ) fprintf ( stderr , " gc_mode_transition: %s->%s \n " , gc_mode_name ( gc_mode ( objspace ) ) , gc_mode_name ( mode ) ) ;
gc_mode_set ( objspace , mode ) ;
2014-09-09 07:13:25 +04:00
}
2013-10-22 14:28:31 +04:00
static void
2014-09-08 08:11:00 +04:00
gc_sweep_start_heap ( rb_objspace_t * objspace , rb_heap_t * heap )
2013-10-22 14:28:31 +04:00
{
2018-05-16 23:39:30 +03:00
heap - > sweeping_page = list_top ( & heap - > pages , struct heap_page , page_node ) ;
2013-10-22 14:28:31 +04:00
heap - > free_pages = NULL ;
2014-11-13 23:16:59 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
2014-09-08 08:11:00 +04:00
heap - > pooled_pages = NULL ;
2014-11-13 23:16:59 +03:00
objspace - > rincgc . pooled_slots = 0 ;
# endif
2013-10-22 14:28:31 +04:00
if ( heap - > using_page ) {
2019-04-02 22:13:07 +03:00
struct heap_page * page = heap - > using_page ;
unpoison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) , false ) ;
RVALUE * * p = & page - > freelist ;
2013-10-26 01:37:39 +04:00
while ( * p ) {
p = & ( * p ) - > as . free . next ;
2013-10-25 23:09:14 +04:00
}
2013-10-26 01:37:39 +04:00
* p = heap - > freelist ;
2019-04-02 22:13:07 +03:00
poison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) ) ;
2013-10-22 14:28:31 +04:00
heap - > using_page = NULL ;
}
heap - > freelist = NULL ;
}
2013-06-05 10:27:43 +04:00
# if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
2013-06-07 08:25:20 +04:00
__attribute__ ( ( noinline ) )
2013-06-05 10:27:43 +04:00
# endif
2012-08-05 14:39:37 +04:00
static void
2014-09-08 08:11:00 +04:00
gc_sweep_start ( rb_objspace_t * objspace )
2012-08-05 14:39:37 +04:00
{
2016-03-04 12:53:03 +03:00
gc_mode_transition ( objspace , gc_mode_sweeping ) ;
2016-03-31 11:21:35 +03:00
gc_sweep_start_heap ( objspace , heap_eden ) ;
2012-08-05 14:39:37 +04:00
}
static void
2014-09-08 08:11:00 +04:00
gc_sweep_finish ( rb_objspace_t * objspace )
2012-08-05 14:39:37 +04:00
{
2017-06-20 11:23:25 +03:00
gc_report ( 1 , objspace , " gc_sweep_finish \n " ) ;
2013-05-22 03:09:22 +04:00
2013-06-24 02:58:01 +04:00
gc_prof_set_heap_info ( objspace ) ;
2013-10-23 12:48:54 +04:00
heap_pages_free_unused_pages ( objspace ) ;
2013-10-23 14:16:01 +04:00
/* if heap_pages has unused pages, then assign them to increment */
2016-01-08 13:34:14 +03:00
if ( heap_allocatable_pages < heap_tomb - > total_pages ) {
2017-06-22 09:42:26 +03:00
heap_allocatable_pages_set ( objspace , heap_tomb - > total_pages ) ;
2013-10-23 12:48:54 +04:00
}
2013-06-22 02:29:09 +04:00
2014-09-08 08:11:00 +04:00
gc_event_hook ( objspace , RUBY_INTERNAL_EVENT_GC_END_SWEEP , 0 ) ;
2016-03-04 12:53:03 +03:00
gc_mode_transition ( objspace , gc_mode_none ) ;
2013-11-04 22:59:33 +04:00
2014-06-03 11:50:23 +04:00
# if RGENGC_CHECK_MODE >= 2
2014-06-04 17:33:20 +04:00
gc_verify_internal_consistency ( Qnil ) ;
2014-06-03 11:50:23 +04:00
# endif
2012-08-05 14:39:37 +04:00
}
static int
2014-09-08 08:11:00 +04:00
gc_sweep_step ( rb_objspace_t * objspace , rb_heap_t * heap )
2012-08-05 14:39:37 +04:00
{
2018-05-16 23:39:30 +03:00
struct heap_page * sweep_page = heap - > sweeping_page ;
2014-09-08 08:11:00 +04:00
int unlink_limit = 3 ;
2014-11-14 04:44:57 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
int need_pool = will_be_incremental_marking ( objspace ) ? TRUE : FALSE ;
2014-09-08 08:11:00 +04:00
gc_report ( 2 , objspace , " gc_sweep_step (need_pool: %d) \n " , need_pool ) ;
2014-11-14 04:44:57 +03:00
# else
gc_report ( 2 , objspace , " gc_sweep_step \n " ) ;
# endif
2013-06-20 00:43:33 +04:00
2014-09-08 08:11:00 +04:00
if ( sweep_page = = NULL ) return FALSE ;
2012-08-05 14:39:37 +04:00
2013-10-26 08:31:10 +04:00
# if GC_ENABLE_LAZY_SWEEP
2013-10-22 14:28:31 +04:00
gc_prof_sweep_timer_start ( objspace ) ;
2013-10-26 08:31:10 +04:00
# endif
2013-07-16 11:33:48 +04:00
2018-05-16 23:39:30 +03:00
do {
2016-03-31 09:51:27 +03:00
int free_slots = gc_page_sweep ( objspace , heap , sweep_page ) ;
2018-05-16 23:39:30 +03:00
heap - > sweeping_page = list_next ( & heap - > pages , sweep_page , page_node ) ;
2013-07-16 11:33:48 +04:00
2016-03-31 09:51:27 +03:00
if ( sweep_page - > final_slots + free_slots = = sweep_page - > total_slots & &
2016-03-31 11:21:35 +03:00
heap_pages_freeable_pages > 0 & &
2014-09-08 08:11:00 +04:00
unlink_limit > 0 ) {
2016-03-31 11:21:35 +03:00
heap_pages_freeable_pages - - ;
2014-09-08 08:11:00 +04:00
unlink_limit - - ;
/* there are no living objects -> move this page to tomb heap */
heap_unlink_page ( objspace , heap , sweep_page ) ;
heap_add_page ( objspace , heap_tomb , sweep_page ) ;
}
2016-03-31 09:51:27 +03:00
else if ( free_slots > 0 ) {
2014-11-14 04:44:57 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
2014-09-08 08:11:00 +04:00
if ( need_pool ) {
2014-11-13 23:16:59 +03:00
if ( heap_add_poolpage ( objspace , heap , sweep_page ) ) {
need_pool = FALSE ;
}
2014-09-08 08:11:00 +04:00
}
else {
heap_add_freepage ( objspace , heap , sweep_page ) ;
break ;
}
2014-11-14 04:44:57 +03:00
# else
heap_add_freepage ( objspace , heap , sweep_page ) ;
break ;
# endif
2014-09-08 08:11:00 +04:00
}
else {
sweep_page - > free_next = NULL ;
2014-07-02 10:47:10 +04:00
}
2018-05-16 23:39:30 +03:00
} while ( ( sweep_page = heap - > sweeping_page ) ) ;
2013-07-16 11:33:48 +04:00
2018-05-16 23:39:30 +03:00
if ( ! heap - > sweeping_page ) {
2014-09-08 08:11:00 +04:00
gc_sweep_finish ( objspace ) ;
2014-07-02 10:47:10 +04:00
}
2013-10-26 08:31:10 +04:00
# if GC_ENABLE_LAZY_SWEEP
2013-06-20 00:43:33 +04:00
gc_prof_sweep_timer_stop ( objspace ) ;
2013-10-26 08:31:10 +04:00
# endif
2013-06-20 00:43:33 +04:00
2014-07-02 10:47:10 +04:00
return heap - > free_pages ! = NULL ;
2012-08-05 14:39:37 +04:00
}
static void
2014-09-08 08:11:00 +04:00
gc_sweep_rest ( rb_objspace_t * objspace )
2012-08-05 14:39:37 +04:00
{
2014-09-08 08:11:00 +04:00
rb_heap_t * heap = heap_eden ; /* lazy sweep only for eden */
2014-10-27 07:40:13 +03:00
while ( has_sweeping_pages ( heap ) ) {
gc_sweep_step ( objspace , heap ) ;
2012-08-05 14:39:37 +04:00
}
}
2013-10-22 14:28:31 +04:00
static void
2014-09-08 08:11:00 +04:00
gc_sweep_continue ( rb_objspace_t * objspace , rb_heap_t * heap )
2013-10-22 14:28:31 +04:00
{
2017-06-22 08:03:18 +03:00
GC_ASSERT ( dont_gc = = FALSE ) ;
2018-06-05 22:53:09 +03:00
if ( ! GC_ENABLE_LAZY_SWEEP ) return ;
2014-09-08 08:11:00 +04:00
gc_enter ( objspace , " sweep_continue " ) ;
# if USE_RGENGC
if ( objspace - > rgengc . need_major_gc = = GPR_FLAG_NONE & & heap_increment ( objspace , heap ) ) {
gc_report ( 3 , objspace , " gc_sweep_continue: success heap_increment(). \n " ) ;
}
# endif
gc_sweep_step ( objspace , heap ) ;
gc_exit ( objspace , " sweep_continue " ) ;
2013-10-22 14:28:31 +04:00
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
static void
2014-09-08 08:11:00 +04:00
gc_sweep ( rb_objspace_t * objspace )
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
{
2014-09-09 06:45:21 +04:00
const unsigned int immediate_sweep = objspace - > flags . immediate_sweep ;
2014-09-08 08:11:00 +04:00
gc_report ( 1 , objspace , " gc_sweep: immediate: %d \n " , immediate_sweep ) ;
2013-05-22 03:09:22 +04:00
if ( immediate_sweep ) {
2013-10-26 08:31:10 +04:00
# if !GC_ENABLE_LAZY_SWEEP
2013-06-21 10:29:30 +04:00
gc_prof_sweep_timer_start ( objspace ) ;
2013-10-26 08:31:10 +04:00
# endif
2014-09-08 08:11:00 +04:00
gc_sweep_start ( objspace ) ;
gc_sweep_rest ( objspace ) ;
2013-10-26 08:31:10 +04:00
# if !GC_ENABLE_LAZY_SWEEP
2013-06-21 10:29:30 +04:00
gc_prof_sweep_timer_stop ( objspace ) ;
2013-10-26 08:31:10 +04:00
# endif
2013-05-22 03:09:22 +04:00
}
else {
2018-05-20 21:09:36 +03:00
struct heap_page * page = NULL ;
2014-09-08 08:11:00 +04:00
gc_sweep_start ( objspace ) ;
2018-05-16 23:39:30 +03:00
list_for_each ( & heap_eden - > pages , page , page_node ) {
page - > flags . before_sweep = TRUE ;
}
2014-09-08 08:11:00 +04:00
gc_sweep_step ( objspace , heap_eden ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2014-09-08 08:11:00 +04:00
gc_heap_prepare_minimum_pages ( objspace , heap_eden ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2012-08-05 14:39:37 +04:00
2013-07-18 03:19:38 +04:00
/* Marking - Marking stack */
2012-10-03 16:30:21 +04:00
static stack_chunk_t *
stack_chunk_alloc ( void )
{
stack_chunk_t * res ;
res = malloc ( sizeof ( stack_chunk_t ) ) ;
if ( ! res )
rb_memerror ( ) ;
return res ;
}
static inline int
2013-11-23 19:35:04 +04:00
is_mark_stack_empty ( mark_stack_t * stack )
2012-10-03 16:30:21 +04:00
{
return stack - > chunk = = NULL ;
}
2014-09-08 08:11:00 +04:00
static size_t
mark_stack_size ( mark_stack_t * stack )
{
size_t size = stack - > index ;
stack_chunk_t * chunk = stack - > chunk ? stack - > chunk - > next : NULL ;
while ( chunk ) {
size + = stack - > limit ;
chunk = chunk - > next ;
}
return size ;
}
2012-10-03 16:30:21 +04:00
static void
add_stack_chunk_cache ( mark_stack_t * stack , stack_chunk_t * chunk )
{
chunk - > next = stack - > cache ;
stack - > cache = chunk ;
stack - > cache_size + + ;
}
static void
shrink_stack_chunk_cache ( mark_stack_t * stack )
{
stack_chunk_t * chunk ;
if ( stack - > unused_cache_size > ( stack - > cache_size / 2 ) ) {
chunk = stack - > cache ;
stack - > cache = stack - > cache - > next ;
stack - > cache_size - - ;
free ( chunk ) ;
}
stack - > unused_cache_size = stack - > cache_size ;
}
static void
push_mark_stack_chunk ( mark_stack_t * stack )
{
stack_chunk_t * next ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( stack - > index = = stack - > limit ) ;
2014-09-08 08:11:00 +04:00
2012-10-03 16:30:21 +04:00
if ( stack - > cache_size > 0 ) {
next = stack - > cache ;
stack - > cache = stack - > cache - > next ;
stack - > cache_size - - ;
if ( stack - > unused_cache_size > stack - > cache_size )
stack - > unused_cache_size = stack - > cache_size ;
}
else {
next = stack_chunk_alloc ( ) ;
}
next - > next = stack - > chunk ;
stack - > chunk = next ;
stack - > index = 0 ;
}
static void
pop_mark_stack_chunk ( mark_stack_t * stack )
{
stack_chunk_t * prev ;
prev = stack - > chunk - > next ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( stack - > index = = 0 ) ;
2012-10-03 16:30:21 +04:00
add_stack_chunk_cache ( stack , stack - > chunk ) ;
stack - > chunk = prev ;
stack - > index = stack - > limit ;
}
static void
free_stack_chunks ( mark_stack_t * stack )
{
stack_chunk_t * chunk = stack - > chunk ;
stack_chunk_t * next = NULL ;
while ( chunk ! = NULL ) {
next = chunk - > next ;
free ( chunk ) ;
chunk = next ;
}
}
static void
push_mark_stack ( mark_stack_t * stack , VALUE data )
{
if ( stack - > index = = stack - > limit ) {
push_mark_stack_chunk ( stack ) ;
}
stack - > chunk - > data [ stack - > index + + ] = data ;
}
static int
pop_mark_stack ( mark_stack_t * stack , VALUE * data )
{
2013-11-23 19:35:04 +04:00
if ( is_mark_stack_empty ( stack ) ) {
2012-10-03 16:30:21 +04:00
return FALSE ;
}
if ( stack - > index = = 1 ) {
* data = stack - > chunk - > data [ - - stack - > index ] ;
pop_mark_stack_chunk ( stack ) ;
}
2013-11-21 08:57:37 +04:00
else {
* data = stack - > chunk - > data [ - - stack - > index ] ;
}
2012-10-03 16:30:21 +04:00
return TRUE ;
}
2014-09-08 08:11:00 +04:00
# if GC_ENABLE_INCREMENTAL_MARK
static int
invalidate_mark_stack_chunk ( stack_chunk_t * chunk , int limit , VALUE obj )
{
int i ;
for ( i = 0 ; i < limit ; i + + ) {
if ( chunk - > data [ i ] = = obj ) {
chunk - > data [ i ] = Qundef ;
return TRUE ;
}
}
return FALSE ;
}
static void
invalidate_mark_stack ( mark_stack_t * stack , VALUE obj )
{
stack_chunk_t * chunk = stack - > chunk ;
int limit = stack - > index ;
while ( chunk ) {
if ( invalidate_mark_stack_chunk ( chunk , limit , obj ) ) return ;
chunk = chunk - > next ;
limit = stack - > limit ;
}
rb_bug ( " invalid_mark_stack: unreachable " ) ;
}
# endif
2012-10-03 16:30:21 +04:00
static void
init_mark_stack ( mark_stack_t * stack )
{
int i ;
2013-11-21 09:01:05 +04:00
2014-09-09 22:23:11 +04:00
MEMZERO ( stack , mark_stack_t , 1 ) ;
2013-11-21 08:57:37 +04:00
stack - > index = stack - > limit = STACK_CHUNK_SIZE ;
2014-09-08 08:11:00 +04:00
stack - > cache_size = 0 ;
2012-10-03 16:30:21 +04:00
2012-12-29 16:22:04 +04:00
for ( i = 0 ; i < 4 ; i + + ) {
2012-10-03 16:30:21 +04:00
add_stack_chunk_cache ( stack , stack_chunk_alloc ( ) ) ;
}
stack - > unused_cache_size = stack - > cache_size ;
}
2013-07-18 03:19:38 +04:00
/* Marking */
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
# ifdef __ia64
2017-09-10 18:49:45 +03:00
# define SET_STACK_END (SET_MACHINE_STACK_END(&ec->machine.stack_end), ec->machine.register_stack_end = rb_ia64_bsp())
2012-08-05 14:39:37 +04:00
# else
2017-09-10 18:49:45 +03:00
# define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2012-08-05 14:39:37 +04:00
# endif
1998-01-16 15:13:05 +03:00
2017-09-10 18:49:45 +03:00
# define STACK_START (ec->machine.stack_start)
# define STACK_END (ec->machine.stack_end)
# define STACK_LEVEL_MAX (ec->machine.stack_maxsize / sizeof(VALUE))
2007-11-20 06:16:53 +03:00
2018-02-15 04:59:17 +03:00
# ifdef __EMSCRIPTEN__
# undef STACK_GROW_DIRECTION
# define STACK_GROW_DIRECTION 1
# endif
2012-08-05 14:39:37 +04:00
# if STACK_GROW_DIRECTION < 0
# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
# elif STACK_GROW_DIRECTION > 0
# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
# else
# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
: ( size_t ) ( STACK_END - STACK_START + 1 ) )
# endif
# if !STACK_GROW_DIRECTION
int ruby_stack_grow_direction ;
int
ruby_get_stack_grow_direction ( volatile VALUE * addr )
{
VALUE * end ;
SET_MACHINE_STACK_END ( & end ) ;
2008-06-28 16:25:45 +04:00
2012-08-05 14:39:37 +04:00
if ( end > addr ) return ruby_stack_grow_direction = 1 ;
return ruby_stack_grow_direction = - 1 ;
}
# endif
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
size_t
ruby_stack_length ( VALUE * * p )
{
2017-10-26 17:44:09 +03:00
rb_execution_context_t * ec = GET_EC ( ) ;
2012-08-05 14:39:37 +04:00
SET_STACK_END ;
if ( p ) * p = STACK_UPPER ( STACK_END , STACK_START , STACK_END ) ;
return STACK_LENGTH ;
}
2008-03-16 03:23:43 +03:00
2017-08-21 17:15:31 +03:00
# define PREVENT_STACK_OVERFLOW 1
2017-04-17 03:10:45 +03:00
# ifndef PREVENT_STACK_OVERFLOW
2012-10-03 16:30:21 +04:00
# if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2017-04-17 03:10:45 +03:00
# define PREVENT_STACK_OVERFLOW 1
# else
# define PREVENT_STACK_OVERFLOW 0
# endif
# endif
# if PREVENT_STACK_OVERFLOW
2012-08-05 14:39:37 +04:00
static int
2017-10-29 16:49:45 +03:00
stack_check ( rb_execution_context_t * ec , int water_mark )
2012-08-05 14:39:37 +04:00
{
int ret ;
SET_STACK_END ;
ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark ;
# ifdef __ia64
if ( ! ret ) {
2017-09-10 18:49:45 +03:00
ret = ( VALUE * ) rb_ia64_bsp ( ) - ec - > machine . register_stack_start >
ec - > machine . register_stack_maxsize / sizeof ( VALUE ) - water_mark ;
2012-08-05 14:39:37 +04:00
}
# endif
return ret ;
}
2017-04-17 03:10:45 +03:00
# else
2017-10-29 16:49:45 +03:00
# define stack_check(ec, water_mark) FALSE
2012-10-03 16:30:21 +04:00
# endif
2008-03-16 03:23:43 +03:00
2017-08-28 19:05:11 +03:00
# define STACKFRAME_FOR_CALL_CFUNC 838
2012-08-05 14:39:37 +04:00
mjit_compile.c: merge initial JIT compiler
which has been developed by Takashi Kokubun <takashikkbn@gmail> as
YARV-MJIT. Many of its bugs are fixed by wanabe <s.wanabe@gmail.com>.
This JIT compiler is designed to be a safe migration path to introduce
JIT compiler to MRI. So this commit does not include any bytecode
changes or dynamic instruction modifications, which are done in original
MJIT.
This commit even strips off some aggressive optimizations from
YARV-MJIT, and thus it's slower than YARV-MJIT too. But it's still
fairly faster than Ruby 2.5 in some benchmarks (attached below).
Note that this JIT compiler passes `make test`, `make test-all`, `make
test-spec` without JIT, and even with JIT. Not only it's perfectly safe
with JIT disabled because it does not replace VM instructions unlike
MJIT, but also with JIT enabled it stably runs Ruby applications
including Rails applications.
I'm expecting this version as just "initial" JIT compiler. I have many
optimization ideas which are skipped for initial merging, and you may
easily replace this JIT compiler with a faster one by just replacing
mjit_compile.c. `mjit_compile` interface is designed for the purpose.
common.mk: update dependencies for mjit_compile.c.
internal.h: declare `rb_vm_insn_addr2insn` for MJIT.
vm.c: exclude some definitions if `-DMJIT_HEADER` is provided to
compiler. This avoids to include some functions which take a long time
to compile, e.g. vm_exec_core. Some of the purpose is achieved in
transform_mjit_header.rb (see `IGNORED_FUNCTIONS`) but others are
manually resolved for now. Load mjit_helper.h for MJIT header.
mjit_helper.h: New. This is a file used only by JIT-ed code. I'll
refactor `mjit_call_cfunc` later.
vm_eval.c: add some #ifdef switches to skip compiling some functions
like Init_vm_eval.
win32/mkexports.rb: export thread/ec functions, which are used by MJIT.
include/ruby/defines.h: add MJIT_FUNC_EXPORTED macro alis to clarify
that a function is exported only for MJIT.
array.c: export a function used by MJIT.
bignum.c: ditto.
class.c: ditto.
compile.c: ditto.
error.c: ditto.
gc.c: ditto.
hash.c: ditto.
iseq.c: ditto.
numeric.c: ditto.
object.c: ditto.
proc.c: ditto.
re.c: ditto.
st.c: ditto.
string.c: ditto.
thread.c: ditto.
variable.c: ditto.
vm_backtrace.c: ditto.
vm_insnhelper.c: ditto.
vm_method.c: ditto.
I would like to improve maintainability of function exports, but I
believe this way is acceptable as initial merging if we clarify the
new exports are for MJIT (so that we can use them as TODO list to fix)
and add unit tests to detect unresolved symbols.
I'll add unit tests of JIT compilations in succeeding commits.
Author: Takashi Kokubun <takashikkbn@gmail.com>
Contributor: wanabe <s.wanabe@gmail.com>
Part of [Feature #14235]
---
* Known issues
* Code generated by gcc is faster than clang. The benchmark may be worse
in macOS. Following benchmark result is provided by gcc w/ Linux.
* Performance is decreased when Google Chrome is running
* JIT can work on MinGW, but it doesn't improve performance at least
in short running benchmark.
* Currently it doesn't perform well with Rails. We'll try to fix this
before release.
---
* Benchmark reslts
Benchmarked with:
Intel 4.0GHz i7-4790K with 16GB memory under x86-64 Ubuntu 8 Cores
- 2.0.0-p0: Ruby 2.0.0-p0
- r62186: Ruby trunk (early 2.6.0), before MJIT changes
- JIT off: On this commit, but without `--jit` option
- JIT on: On this commit, and with `--jit` option
** Optcarrot fps
Benchmark: https://github.com/mame/optcarrot
| |2.0.0-p0 |r62186 |JIT off |JIT on |
|:--------|:--------|:--------|:--------|:--------|
|fps |37.32 |51.46 |51.31 |58.88 |
|vs 2.0.0 |1.00x |1.38x |1.37x |1.58x |
** MJIT benchmarks
Benchmark: https://github.com/benchmark-driver/mjit-benchmarks
(Original: https://github.com/vnmakarov/ruby/tree/rtl_mjit_branch/MJIT-benchmarks)
| |2.0.0-p0 |r62186 |JIT off |JIT on |
|:----------|:--------|:--------|:--------|:--------|
|aread |1.00 |1.09 |1.07 |2.19 |
|aref |1.00 |1.13 |1.11 |2.22 |
|aset |1.00 |1.50 |1.45 |2.64 |
|awrite |1.00 |1.17 |1.13 |2.20 |
|call |1.00 |1.29 |1.26 |2.02 |
|const2 |1.00 |1.10 |1.10 |2.19 |
|const |1.00 |1.11 |1.10 |2.19 |
|fannk |1.00 |1.04 |1.02 |1.00 |
|fib |1.00 |1.32 |1.31 |1.84 |
|ivread |1.00 |1.13 |1.12 |2.43 |
|ivwrite |1.00 |1.23 |1.21 |2.40 |
|mandelbrot |1.00 |1.13 |1.16 |1.28 |
|meteor |1.00 |2.97 |2.92 |3.17 |
|nbody |1.00 |1.17 |1.15 |1.49 |
|nest-ntimes|1.00 |1.22 |1.20 |1.39 |
|nest-while |1.00 |1.10 |1.10 |1.37 |
|norm |1.00 |1.18 |1.16 |1.24 |
|nsvb |1.00 |1.16 |1.16 |1.17 |
|red-black |1.00 |1.02 |0.99 |1.12 |
|sieve |1.00 |1.30 |1.28 |1.62 |
|trees |1.00 |1.14 |1.13 |1.19 |
|while |1.00 |1.12 |1.11 |2.41 |
** Discourse's script/bench.rb
Benchmark: https://github.com/discourse/discourse/blob/v1.8.7/script/bench.rb
NOTE: Rails performance was somehow a little degraded with JIT for now.
We should fix this.
(At least I know opt_aref is performing badly in JIT and I have an idea
to fix it. Please wait for the fix.)
*** JIT off
Your Results: (note for timings- percentile is first, duration is second in millisecs)
categories_admin:
50: 17
75: 18
90: 22
99: 29
home_admin:
50: 21
75: 21
90: 27
99: 40
topic_admin:
50: 17
75: 18
90: 22
99: 32
categories:
50: 35
75: 41
90: 43
99: 77
home:
50: 39
75: 46
90: 49
99: 95
topic:
50: 46
75: 52
90: 56
99: 101
*** JIT on
Your Results: (note for timings- percentile is first, duration is second in millisecs)
categories_admin:
50: 19
75: 21
90: 25
99: 33
home_admin:
50: 24
75: 26
90: 30
99: 35
topic_admin:
50: 19
75: 20
90: 25
99: 30
categories:
50: 40
75: 44
90: 48
99: 76
home:
50: 42
75: 48
90: 51
99: 89
topic:
50: 49
75: 55
90: 58
99: 99
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62197 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 14:22:28 +03:00
MJIT_FUNC_EXPORTED int
2017-10-29 16:49:45 +03:00
rb_ec_stack_check ( rb_execution_context_t * ec )
2017-04-17 03:10:47 +03:00
{
2017-10-29 16:49:45 +03:00
return stack_check ( ec , STACKFRAME_FOR_CALL_CFUNC ) ;
2017-04-17 03:10:47 +03:00
}
2012-08-05 14:39:37 +04:00
int
ruby_stack_check ( void )
{
2017-10-29 16:49:45 +03:00
return stack_check ( GET_EC ( ) , STACKFRAME_FOR_CALL_CFUNC ) ;
2012-08-05 14:39:37 +04:00
}
2018-11-07 07:56:24 +03:00
ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS ( static void mark_locations_array ( rb_objspace_t * objspace , register const VALUE * x , register long n ) ) ;
2012-08-05 14:39:37 +04:00
static void
2014-06-18 10:16:39 +04:00
mark_locations_array ( rb_objspace_t * objspace , register const VALUE * x , register long n )
2012-08-05 14:39:37 +04:00
{
VALUE v ;
while ( n - - ) {
v = * x ;
2013-06-17 06:54:25 +04:00
gc_mark_maybe ( objspace , v ) ;
2012-08-05 14:39:37 +04:00
x + + ;
1998-01-16 15:13:05 +03:00
}
}
2012-08-05 14:39:37 +04:00
static void
2014-06-18 10:16:39 +04:00
gc_mark_locations ( rb_objspace_t * objspace , const VALUE * start , const VALUE * end )
2012-08-05 14:39:37 +04:00
{
long n ;
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
if ( end < = start ) return ;
n = end - start ;
mark_locations_array ( objspace , start , n ) ;
}
void
2014-06-18 10:16:39 +04:00
rb_gc_mark_locations ( const VALUE * start , const VALUE * end )
2008-07-27 09:59:32 +04:00
{
2012-08-05 14:39:37 +04:00
gc_mark_locations ( & rb_objspace , start , end ) ;
}
2012-01-07 18:02:23 +04:00
2016-07-28 22:13:26 +03:00
static void
gc_mark_values ( rb_objspace_t * objspace , long n , const VALUE * values )
2014-09-11 14:34:09 +04:00
{
long i ;
for ( i = 0 ; i < n ; i + + ) {
gc_mark ( objspace , values [ i ] ) ;
}
}
2016-07-28 22:13:26 +03:00
void
rb_gc_mark_values ( long n , const VALUE * values )
{
rb_objspace_t * objspace = & rb_objspace ;
gc_mark_values ( objspace , n , values ) ;
}
2012-08-05 14:39:37 +04:00
static int
mark_entry ( st_data_t key , st_data_t value , st_data_t data )
{
2015-06-06 02:00:22 +03:00
rb_objspace_t * objspace = ( rb_objspace_t * ) data ;
gc_mark ( objspace , ( VALUE ) value ) ;
2012-08-05 14:39:37 +04:00
return ST_CONTINUE ;
2008-07-27 09:59:32 +04:00
}
2004-09-27 16:25:21 +04:00
static void
2012-10-03 16:30:21 +04:00
mark_tbl ( rb_objspace_t * objspace , st_table * tbl )
2004-09-27 16:25:21 +04:00
{
2012-08-05 14:39:37 +04:00
if ( ! tbl | | tbl - > num_entries = = 0 ) return ;
2015-06-06 02:00:22 +03:00
st_foreach ( tbl , mark_entry , ( st_data_t ) objspace ) ;
2004-09-27 16:25:21 +04:00
}
2012-08-05 14:39:37 +04:00
static int
mark_key ( st_data_t key , st_data_t value , st_data_t data )
2010-06-29 07:11:05 +04:00
{
2015-06-06 02:00:22 +03:00
rb_objspace_t * objspace = ( rb_objspace_t * ) data ;
gc_mark ( objspace , ( VALUE ) key ) ;
2012-08-05 14:39:37 +04:00
return ST_CONTINUE ;
2010-06-29 07:11:05 +04:00
}
2008-07-05 11:15:41 +04:00
static void
2012-10-03 16:30:21 +04:00
mark_set ( rb_objspace_t * objspace , st_table * tbl )
2004-09-27 16:25:21 +04:00
{
2012-08-05 14:39:37 +04:00
if ( ! tbl ) return ;
2015-06-06 02:00:22 +03:00
st_foreach ( tbl , mark_key , ( st_data_t ) objspace ) ;
2012-08-05 14:39:37 +04:00
}
2008-07-02 04:49:10 +04:00
2012-08-05 14:39:37 +04:00
void
rb_mark_set ( st_table * tbl )
{
2012-10-03 16:30:21 +04:00
mark_set ( & rb_objspace , tbl ) ;
2012-08-05 14:39:37 +04:00
}
static int
mark_keyvalue ( st_data_t key , st_data_t value , st_data_t data )
{
2015-06-06 02:00:22 +03:00
rb_objspace_t * objspace = ( rb_objspace_t * ) data ;
gc_mark ( objspace , ( VALUE ) key ) ;
gc_mark ( objspace , ( VALUE ) value ) ;
2012-08-05 14:39:37 +04:00
return ST_CONTINUE ;
2008-07-02 04:49:10 +04:00
}
1998-01-16 15:13:05 +03:00
2012-01-07 18:02:23 +04:00
static void
2018-10-31 01:11:51 +03:00
mark_hash ( rb_objspace_t * objspace , VALUE hash )
{
rb_hash_stlike_foreach ( hash , mark_keyvalue , ( st_data_t ) objspace ) ;
2018-12-14 04:10:15 +03:00
if ( RHASH_AR_TABLE_P ( hash ) ) {
2018-10-31 01:11:51 +03:00
if ( objspace - > mark_func_data = = NULL & & RHASH_TRANSIENT_P ( hash ) ) {
2018-12-14 04:10:15 +03:00
rb_transient_heap_mark ( hash , RHASH_AR_TABLE ( hash ) ) ;
2018-10-31 01:11:51 +03:00
}
}
else {
VM_ASSERT ( ! RHASH_TRANSIENT_P ( hash ) ) ;
}
gc_mark ( objspace , RHASH ( hash ) - > ifnone ) ;
}
static void
mark_st ( rb_objspace_t * objspace , st_table * tbl )
2012-01-07 18:02:23 +04:00
{
2012-08-05 14:39:37 +04:00
if ( ! tbl ) return ;
2015-06-06 02:00:22 +03:00
st_foreach ( tbl , mark_keyvalue , ( st_data_t ) objspace ) ;
2012-08-05 14:39:37 +04:00
}
void
rb_mark_hash ( st_table * tbl )
{
2018-10-31 01:11:51 +03:00
mark_st ( & rb_objspace , tbl ) ;
2012-01-07 18:02:23 +04:00
}
2008-07-02 04:49:10 +04:00
static void
2012-10-03 16:30:21 +04:00
mark_method_entry ( rb_objspace_t * objspace , const rb_method_entry_t * me )
2008-07-02 04:49:10 +04:00
{
2012-08-05 14:39:37 +04:00
const rb_method_definition_t * def = me - > def ;
2008-07-27 09:59:32 +04:00
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 14:24:50 +03:00
gc_mark ( objspace , me - > owner ) ;
gc_mark ( objspace , me - > defined_class ) ;
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 15:24:58 +03:00
2015-06-25 10:59:23 +03:00
if ( def ) {
switch ( def - > type ) {
case VM_METHOD_TYPE_ISEQ :
2015-07-22 01:52:59 +03:00
if ( def - > body . iseq . iseqptr ) gc_mark ( objspace , ( VALUE ) def - > body . iseq . iseqptr ) ;
2015-06-25 10:59:23 +03:00
gc_mark ( objspace , ( VALUE ) def - > body . iseq . cref ) ;
break ;
case VM_METHOD_TYPE_ATTRSET :
case VM_METHOD_TYPE_IVAR :
gc_mark ( objspace , def - > body . attr . location ) ;
break ;
case VM_METHOD_TYPE_BMETHOD :
2018-11-26 21:16:54 +03:00
gc_mark ( objspace , def - > body . bmethod . proc ) ;
2018-11-26 21:16:39 +03:00
if ( def - > body . bmethod . hooks ) rb_hook_list_mark ( def - > body . bmethod . hooks ) ;
2015-06-25 10:59:23 +03:00
break ;
case VM_METHOD_TYPE_ALIAS :
gc_mark ( objspace , ( VALUE ) def - > body . alias . original_me ) ;
return ;
case VM_METHOD_TYPE_REFINED :
gc_mark ( objspace , ( VALUE ) def - > body . refined . orig_me ) ;
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 14:24:50 +03:00
gc_mark ( objspace , ( VALUE ) def - > body . refined . owner ) ;
2015-06-25 10:59:23 +03:00
break ;
case VM_METHOD_TYPE_CFUNC :
case VM_METHOD_TYPE_ZSUPER :
case VM_METHOD_TYPE_MISSING :
case VM_METHOD_TYPE_OPTIMIZED :
case VM_METHOD_TYPE_UNDEF :
case VM_METHOD_TYPE_NOTIMPLEMENTED :
break ;
}
1998-01-16 15:13:05 +03:00
}
2012-08-05 14:39:37 +04:00
}
2008-07-02 04:49:10 +04:00
2015-08-12 11:43:55 +03:00
static enum rb_id_table_iterator_result
mark_method_entry_i ( VALUE me , void * data )
2012-08-05 14:39:37 +04:00
{
2015-06-06 02:00:22 +03:00
rb_objspace_t * objspace = ( rb_objspace_t * ) data ;
gc_mark ( objspace , me ) ;
2015-08-12 11:59:27 +03:00
return ID_TABLE_CONTINUE ;
2012-08-05 14:39:37 +04:00
}
static void
2015-08-12 11:43:55 +03:00
mark_m_tbl ( rb_objspace_t * objspace , struct rb_id_table * tbl )
2012-08-05 14:39:37 +04:00
{
2015-03-06 01:20:14 +03:00
if ( tbl ) {
2015-08-12 11:43:55 +03:00
rb_id_table_foreach_values ( tbl , mark_method_entry_i , objspace ) ;
2013-12-03 12:11:07 +04:00
}
2010-05-28 15:13:42 +04:00
}
2008-07-05 11:15:41 +04:00
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
static enum rb_id_table_iterator_result
mark_const_entry_i ( VALUE value , void * data )
2010-05-28 15:13:42 +04:00
{
2014-05-07 19:43:37 +04:00
const rb_const_entry_t * ce = ( const rb_const_entry_t * ) value ;
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
rb_objspace_t * objspace = data ;
2015-06-06 02:00:22 +03:00
gc_mark ( objspace , ce - > value ) ;
gc_mark ( objspace , ce - > file ) ;
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
return ID_TABLE_CONTINUE ;
2010-05-28 15:13:42 +04:00
}
2008-07-27 17:08:02 +04:00
2010-05-28 15:13:42 +04:00
static void
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
mark_const_tbl ( rb_objspace_t * objspace , struct rb_id_table * tbl )
2010-05-28 15:13:42 +04:00
{
2012-08-05 14:39:37 +04:00
if ( ! tbl ) return ;
use id_table for constant tables
valgrind 3.9.0 on x86-64 reports a minor reduction in memory usage
when loading only RubyGems and RDoc by running: ruby -rrdoc -eexit
before: HEAP SUMMARY:
in use at exit: 2,913,448 bytes in 27,394 blocks
total heap usage: 48,362 allocs, 20,968 frees, 9,034,621 bytes alloc
after: HEAP SUMMARY:
in use at exit: 2,880,056 bytes in 26,712 blocks
total heap usage: 47,791 allocs, 21,079 frees, 9,046,507 bytes alloc
* class.c (struct clone_const_arg): adjust for id_table
(clone_const): ditto
(clone_const_i): ditto
(rb_mod_init_copy): ditto
(rb_singleton_class_clone_and_attach): ditto
(rb_include_class_new): ditto
(include_modules_at): ditto
* constant.h (rb_free_const_table): ditto
* gc.c (free_const_entry_i): ditto
(rb_free_const_table): ditto
(obj_memsize_of): ditto
(mark_const_entry_i): ditto
(mark_const_tbl): ditto
* internal.h (struct rb_classext_struct): ditto
* object.c (rb_mod_const_set): resolve class name on assignment
* variable.c (const_update): replace with const_tbl_update
(const_tbl_update): new function
(fc_i): adjust for id_table
(find_class_path): ditto
(autoload_const_set): st_update => const_tbl_update
(rb_const_remove): adjust for id_table
(sv_i): ditto
(rb_local_constants_i): ditto
(rb_local_constants): ditto
(rb_mod_const_at): ditto
(rb_mod_const_set): ditto
(rb_const_lookup): ditto
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53376 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-12-29 23:19:14 +03:00
rb_id_table_foreach_values ( tbl , mark_const_entry_i , objspace ) ;
2010-05-28 15:13:42 +04:00
}
2012-08-05 14:39:37 +04:00
# if STACK_GROW_DIRECTION < 0
# define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
# elif STACK_GROW_DIRECTION > 0
# define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
# else
# define GET_STACK_BOUNDS(start, end, appendix) \
( ( STACK_END < STACK_START ) ? \
( ( start ) = STACK_END , ( end ) = STACK_START ) : ( ( start ) = STACK_START , ( end ) = STACK_END + ( appendix ) ) )
# endif
2017-09-10 18:49:45 +03:00
static void mark_stack_locations ( rb_objspace_t * objspace , const rb_execution_context_t * ec ,
2016-03-15 10:02:09 +03:00
const VALUE * stack_start , const VALUE * stack_end ) ;
2010-05-28 15:13:42 +04:00
static void
2017-09-10 18:49:45 +03:00
mark_current_machine_context ( rb_objspace_t * objspace , rb_execution_context_t * ec )
2010-05-28 15:13:42 +04:00
{
2012-08-05 14:39:37 +04:00
union {
rb_jmp_buf j ;
VALUE v [ sizeof ( rb_jmp_buf ) / sizeof ( VALUE ) ] ;
} save_regs_gc_mark ;
VALUE * stack_start , * stack_end ;
2010-05-28 15:13:42 +04:00
2012-08-05 14:39:37 +04:00
FLUSH_REGISTER_WINDOWS ;
/* This assumes that all registers are saved into the jmp_buf (and stack) */
rb_setjmp ( save_regs_gc_mark . j ) ;
2010-06-06 08:12:20 +04:00
2014-04-09 18:32:01 +04:00
/* SET_STACK_END must be called in this function because
* the stack frame of this function may contain
* callee save registers and they should be marked . */
2014-04-09 18:30:46 +04:00
SET_STACK_END ;
2012-08-05 14:39:37 +04:00
GET_STACK_BOUNDS ( stack_start , stack_end , 1 ) ;
2008-07-02 04:49:10 +04:00
2012-08-05 14:39:37 +04:00
mark_locations_array ( objspace , save_regs_gc_mark . v , numberof ( save_regs_gc_mark . v ) ) ;
2017-09-10 18:49:45 +03:00
mark_stack_locations ( objspace , ec , stack_start , stack_end ) ;
2010-05-28 15:13:42 +04:00
}
2012-08-05 14:39:37 +04:00
void
2017-09-10 18:49:45 +03:00
rb_gc_mark_machine_stack ( const rb_execution_context_t * ec )
2010-05-28 15:13:42 +04:00
{
2017-09-10 18:49:45 +03:00
rb_objspace_t * objspace = & rb_objspace ;
2012-08-05 14:39:37 +04:00
VALUE * stack_start , * stack_end ;
2010-06-29 07:11:05 +04:00
2012-08-05 14:39:37 +04:00
GET_STACK_BOUNDS ( stack_start , stack_end , 0 ) ;
2017-09-10 18:49:45 +03:00
mark_stack_locations ( objspace , ec , stack_start , stack_end ) ;
2016-03-15 10:02:09 +03:00
}
static void
2017-09-10 18:49:45 +03:00
mark_stack_locations ( rb_objspace_t * objspace , const rb_execution_context_t * ec ,
2016-03-15 10:02:09 +03:00
const VALUE * stack_start , const VALUE * stack_end )
{
2016-03-15 09:42:27 +03:00
gc_mark_locations ( objspace , stack_start , stack_end ) ;
2012-08-05 14:39:37 +04:00
# ifdef __ia64
2016-03-15 09:42:27 +03:00
gc_mark_locations ( objspace ,
2017-09-10 18:49:45 +03:00
ec - > machine . register_stack_start ,
ec - > machine . register_stack_end ) ;
2012-08-05 14:39:37 +04:00
# endif
2014-10-21 06:50:23 +04:00
# if defined(__mc68000__)
2016-03-15 09:42:27 +03:00
gc_mark_locations ( objspace ,
( VALUE * ) ( ( char * ) stack_start + 2 ) ,
( VALUE * ) ( ( char * ) stack_end - 2 ) ) ;
2014-10-21 06:50:23 +04:00
# endif
2010-05-28 15:13:42 +04:00
}
2012-08-05 14:39:37 +04:00
void
rb_mark_tbl ( st_table * tbl )
2010-10-21 08:18:09 +04:00
{
2012-10-03 16:30:21 +04:00
mark_tbl ( & rb_objspace , tbl ) ;
2010-10-21 08:18:09 +04:00
}
2013-06-17 06:54:25 +04:00
static void
gc_mark_maybe ( rb_objspace_t * objspace , VALUE obj )
{
( void ) VALGRIND_MAKE_MEM_DEFINED ( & obj , sizeof ( obj ) ) ;
2013-06-18 06:17:59 +04:00
if ( is_pointer_to_heap ( objspace , ( void * ) obj ) ) {
2018-11-06 13:06:07 +03:00
int type ;
void * ptr = __asan_region_is_poisoned ( ( void * ) obj , SIZEOF_VALUE ) ;
unpoison_object ( obj , false ) ;
type = BUILTIN_TYPE ( obj ) ;
2013-06-18 06:17:59 +04:00
if ( type ! = T_ZOMBIE & & type ! = T_NONE ) {
2014-09-11 14:23:36 +04:00
gc_mark_ptr ( objspace , obj ) ;
2013-06-18 06:17:59 +04:00
}
2018-11-06 13:06:07 +03:00
if ( ptr ) {
poison_object ( obj ) ;
}
2013-06-17 06:54:25 +04:00
}
}
2012-08-05 14:39:37 +04:00
void
rb_gc_mark_maybe ( VALUE obj )
{
2013-06-17 06:54:25 +04:00
gc_mark_maybe ( & rb_objspace , obj ) ;
2012-08-05 14:39:37 +04:00
}
2010-05-28 15:13:42 +04:00
2013-11-04 22:59:33 +04:00
static inline int
2014-09-11 14:23:36 +04:00
gc_mark_set ( rb_objspace_t * objspace , VALUE obj )
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
{
2014-09-08 08:11:00 +04:00
if ( RVALUE_MARKED ( obj ) ) return 0 ;
MARK_IN_BITMAP ( GET_HEAP_MARK_BITS ( obj ) , obj ) ;
return 1 ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2014-09-08 08:11:00 +04:00
# if USE_RGENGC
static int
gc_remember_unprotected ( rb_objspace_t * objspace , VALUE obj )
2010-05-28 15:13:42 +04:00
{
2014-09-08 08:11:00 +04:00
struct heap_page * page = GET_HEAP_PAGE ( obj ) ;
2015-03-18 21:02:13 +03:00
bits_t * uncollectible_bits = & page - > uncollectible_bits [ 0 ] ;
2014-09-08 08:11:00 +04:00
2015-03-18 21:02:13 +03:00
if ( ! MARKED_IN_BITMAP ( uncollectible_bits , obj ) ) {
page - > flags . has_uncollectible_shady_objects = TRUE ;
MARK_IN_BITMAP ( uncollectible_bits , obj ) ;
objspace - > rgengc . uncollectible_wb_unprotected_objects + + ;
2014-09-10 02:32:09 +04:00
# if RGENGC_PROFILE > 0
objspace - > profile . total_remembered_shady_object_count + + ;
# if RGENGC_PROFILE >= 2
objspace - > profile . remembered_shady_object_count_types [ BUILTIN_TYPE ( obj ) ] + + ;
# endif
# endif
2014-09-08 08:11:00 +04:00
return TRUE ;
}
else {
return FALSE ;
}
2012-08-05 14:39:37 +04:00
}
2014-09-08 08:11:00 +04:00
# endif
2010-05-28 15:13:42 +04:00
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
static void
2013-12-13 06:38:05 +04:00
rgengc_check_relation ( rb_objspace_t * objspace , VALUE obj )
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
{
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
const VALUE old_parent = objspace - > rgengc . parent_object ;
if ( old_parent ) { /* parent object is old */
if ( RVALUE_WB_UNPROTECTED ( obj ) ) {
if ( gc_remember_unprotected ( objspace , obj ) ) {
gc_report ( 2 , objspace , " relation: (O->S) %s -> %s \n " , obj_info ( old_parent ) , obj_info ( obj ) ) ;
2013-11-04 22:59:33 +04:00
}
2013-05-24 14:21:04 +04:00
}
2013-11-04 22:59:33 +04:00
else {
2014-09-08 08:11:00 +04:00
if ( ! RVALUE_OLD_P ( obj ) ) {
if ( RVALUE_MARKED ( obj ) ) {
2013-11-04 22:59:33 +04:00
/* An object pointed from an OLD object should be OLD. */
2014-09-08 08:11:00 +04:00
gc_report ( 2 , objspace , " relation: (O->unmarked Y) %s -> %s \n " , obj_info ( old_parent ) , obj_info ( obj ) ) ;
RVALUE_AGE_SET_OLD ( objspace , obj ) ;
if ( is_incremental_marking ( objspace ) ) {
if ( ! RVALUE_MARKING ( obj ) ) {
gc_grey ( objspace , obj ) ;
}
}
else {
rgengc_remember ( objspace , obj ) ;
}
2013-11-04 22:59:33 +04:00
}
2014-09-08 08:11:00 +04:00
else {
gc_report ( 2 , objspace , " relation: (O->Y) %s -> %s \n " , obj_info ( old_parent ) , obj_info ( obj ) ) ;
RVALUE_AGE_SET_CANDIDATE ( objspace , obj ) ;
2013-11-04 22:59:33 +04:00
}
}
}
2014-09-08 08:11:00 +04:00
}
2017-06-22 08:03:18 +03:00
GC_ASSERT ( old_parent = = objspace - > rgengc . parent_object ) ;
2013-11-04 22:59:33 +04:00
# endif
2014-09-08 08:11:00 +04:00
}
static void
gc_grey ( rb_objspace_t * objspace , VALUE obj )
{
# if RGENGC_CHECK_MODE
if ( RVALUE_MARKED ( obj ) = = FALSE ) rb_bug ( " gc_grey: %s is not marked. " , obj_info ( obj ) ) ;
if ( RVALUE_MARKING ( obj ) = = TRUE ) rb_bug ( " gc_grey: %s is marking/remembered. " , obj_info ( obj ) ) ;
# endif
# if GC_ENABLE_INCREMENTAL_MARK
if ( is_incremental_marking ( objspace ) ) {
MARK_IN_BITMAP ( GET_HEAP_MARKING_BITS ( obj ) , obj ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
# endif
2014-09-08 08:11:00 +04:00
push_mark_stack ( & objspace - > mark_stack , obj ) ;
}
static void
gc_aging ( rb_objspace_t * objspace , VALUE obj )
{
# if USE_RGENGC
struct heap_page * page = GET_HEAP_PAGE ( obj ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( RVALUE_MARKING ( obj ) = = FALSE ) ;
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
2015-03-18 21:02:13 +03:00
if ( ! RVALUE_PAGE_WB_UNPROTECTED ( page , obj ) ) {
2014-09-08 08:11:00 +04:00
if ( ! RVALUE_OLD_P ( obj ) ) {
gc_report ( 3 , objspace , " gc_aging: YOUNG: %s \n " , obj_info ( obj ) ) ;
RVALUE_AGE_INC ( objspace , obj ) ;
}
else if ( is_full_marking ( objspace ) ) {
2017-06-22 08:03:18 +03:00
GC_ASSERT ( RVALUE_PAGE_UNCOLLECTIBLE ( page , obj ) = = FALSE ) ;
2015-03-18 21:02:13 +03:00
RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET ( objspace , page , obj ) ;
2014-09-08 08:11:00 +04:00
}
}
check_rvalue_consistency ( obj ) ;
# endif /* USE_RGENGC */
2014-09-09 14:55:18 +04:00
objspace - > marked_slots + + ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2015-10-29 12:48:38 +03:00
NOINLINE ( static void gc_mark_ptr ( rb_objspace_t * objspace , VALUE obj ) ) ;
2012-08-05 14:39:37 +04:00
static void
2014-09-11 14:23:36 +04:00
gc_mark_ptr ( rb_objspace_t * objspace , VALUE obj )
2012-08-05 14:39:37 +04:00
{
2014-09-08 08:11:00 +04:00
if ( LIKELY ( objspace - > mark_func_data = = NULL ) ) {
2015-06-24 21:38:36 +03:00
rgengc_check_relation ( objspace , obj ) ;
2014-09-11 14:23:36 +04:00
if ( ! gc_mark_set ( objspace , obj ) ) return ; /* already marked */
2018-12-23 18:02:17 +03:00
if ( RB_TYPE_P ( obj , T_NONE ) ) rb_bug ( " try to mark T_NONE object " ) ; /* check here will help debugging */
2014-09-08 08:11:00 +04:00
gc_aging ( objspace , obj ) ;
gc_grey ( objspace , obj ) ;
2012-10-05 12:14:09 +04:00
}
else {
2014-09-08 08:11:00 +04:00
objspace - > mark_func_data - > mark_func ( obj , objspace - > mark_func_data - > data ) ;
2012-10-05 12:14:09 +04:00
}
2012-08-05 14:39:37 +04:00
}
2010-05-28 15:13:42 +04:00
2016-07-26 12:57:50 +03:00
static inline void
2014-09-11 14:23:36 +04:00
gc_mark ( rb_objspace_t * objspace , VALUE obj )
{
if ( ! is_markable_object ( objspace , obj ) ) return ;
gc_mark_ptr ( objspace , obj ) ;
}
2012-08-05 14:39:37 +04:00
void
rb_gc_mark ( VALUE ptr )
{
2012-10-03 16:30:21 +04:00
gc_mark ( & rb_objspace , ptr ) ;
2010-05-28 15:13:42 +04:00
}
2014-02-08 11:03:43 +04:00
/* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
* This function is only for GC_END_MARK timing .
*/
int
rb_objspace_marked_object_p ( VALUE obj )
{
2014-09-08 08:11:00 +04:00
return RVALUE_MARKED ( obj ) ? TRUE : FALSE ;
2014-02-08 11:03:43 +04:00
}
2014-09-08 08:11:00 +04:00
static inline void
gc_mark_set_parent ( rb_objspace_t * objspace , VALUE obj )
2013-11-19 13:48:47 +04:00
{
2014-06-30 10:14:37 +04:00
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
if ( RVALUE_OLD_P ( obj ) ) {
objspace - > rgengc . parent_object = obj ;
2013-11-19 13:48:47 +04:00
}
2014-09-08 08:11:00 +04:00
else {
objspace - > rgengc . parent_object = Qfalse ;
}
# endif
2013-11-19 13:48:47 +04:00
}
2016-07-28 22:13:26 +03:00
static void
gc_mark_imemo ( rb_objspace_t * objspace , VALUE obj )
{
switch ( imemo_type ( obj ) ) {
case imemo_env :
{
const rb_env_t * env = ( const rb_env_t * ) obj ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( VM_ENV_ESCAPED_P ( env - > ep ) ) ;
2016-07-28 22:13:26 +03:00
gc_mark_values ( objspace , ( long ) env - > env_size , env - > env ) ;
VM_ENV_FLAGS_SET ( env - > ep , VM_ENV_FLAG_WB_REQUIRED ) ;
gc_mark ( objspace , ( VALUE ) rb_vm_env_prev_env ( env ) ) ;
gc_mark ( objspace , ( VALUE ) env - > iseq ) ;
}
return ;
case imemo_cref :
gc_mark ( objspace , RANY ( obj ) - > as . imemo . cref . klass ) ;
gc_mark ( objspace , ( VALUE ) RANY ( obj ) - > as . imemo . cref . next ) ;
gc_mark ( objspace , RANY ( obj ) - > as . imemo . cref . refinements ) ;
return ;
case imemo_svar :
gc_mark ( objspace , RANY ( obj ) - > as . imemo . svar . cref_or_me ) ;
gc_mark ( objspace , RANY ( obj ) - > as . imemo . svar . lastline ) ;
gc_mark ( objspace , RANY ( obj ) - > as . imemo . svar . backref ) ;
gc_mark ( objspace , RANY ( obj ) - > as . imemo . svar . others ) ;
return ;
case imemo_throw_data :
gc_mark ( objspace , RANY ( obj ) - > as . imemo . throw_data . throw_obj ) ;
return ;
case imemo_ifunc :
gc_mark_maybe ( objspace , ( VALUE ) RANY ( obj ) - > as . imemo . ifunc . data ) ;
return ;
case imemo_memo :
gc_mark ( objspace , RANY ( obj ) - > as . imemo . memo . v1 ) ;
gc_mark ( objspace , RANY ( obj ) - > as . imemo . memo . v2 ) ;
gc_mark_maybe ( objspace , RANY ( obj ) - > as . imemo . memo . u3 . value ) ;
return ;
case imemo_ment :
mark_method_entry ( objspace , & RANY ( obj ) - > as . imemo . ment ) ;
return ;
case imemo_iseq :
rb_iseq_mark ( ( rb_iseq_t * ) obj ) ;
return ;
2018-05-09 10:11:59 +03:00
case imemo_tmpbuf :
2017-10-21 12:10:42 +03:00
{
2018-05-09 10:11:59 +03:00
const rb_imemo_tmpbuf_t * m = & RANY ( obj ) - > as . imemo . alloc ;
2017-10-21 12:10:42 +03:00
do {
rb_gc_mark_locations ( m - > ptr , m - > ptr + m - > cnt ) ;
} while ( ( m = m - > next ) ! = NULL ) ;
}
2017-10-21 11:40:28 +03:00
return ;
2017-10-27 19:44:57 +03:00
case imemo_ast :
rb_ast_mark ( & RANY ( obj ) - > as . imemo . ast ) ;
return ;
2017-11-04 13:56:50 +03:00
case imemo_parser_strterm :
2017-11-04 10:21:36 +03:00
rb_strterm_mark ( obj ) ;
return ;
2016-07-28 22:13:26 +03:00
# if VM_CHECK_MODE > 0
default :
VM_UNREACHABLE ( gc_mark_imemo ) ;
# endif
}
}
2010-05-28 15:13:42 +04:00
static void
2014-09-08 08:11:00 +04:00
gc_mark_children ( rb_objspace_t * objspace , VALUE obj )
2010-05-28 15:13:42 +04:00
{
2014-09-08 08:11:00 +04:00
register RVALUE * any = RANY ( obj ) ;
gc_mark_set_parent ( objspace , obj ) ;
2010-05-28 15:13:42 +04:00
2014-09-08 08:11:00 +04:00
if ( FL_TEST ( obj , FL_EXIVAR ) ) {
rb_mark_generic_ivar ( obj ) ;
2010-05-28 15:13:42 +04:00
}
2012-08-05 14:39:37 +04:00
switch ( BUILTIN_TYPE ( obj ) ) {
2019-03-09 03:00:26 +03:00
case T_FLOAT :
case T_BIGNUM :
case T_SYMBOL :
/* Not immediates, but does not have references and singleton
* class */
return ;
2012-08-05 14:39:37 +04:00
case T_NIL :
case T_FIXNUM :
rb_bug ( " rb_gc_mark() called for broken object " ) ;
break ;
case T_NODE :
2017-11-04 17:32:48 +03:00
UNEXPECTED_NODE ( rb_gc_mark ) ;
2017-11-04 13:02:43 +03:00
break ;
2015-03-11 13:36:17 +03:00
case T_IMEMO :
2016-07-28 22:13:26 +03:00
gc_mark_imemo ( objspace , obj ) ;
return ;
1998-01-16 15:13:05 +03:00
}
2015-06-01 15:20:06 +03:00
2014-09-08 08:11:00 +04:00
gc_mark ( objspace , any - > as . basic . klass ) ;
2008-07-27 09:59:32 +04:00
switch ( BUILTIN_TYPE ( obj ) ) {
1998-01-16 15:13:05 +03:00
case T_CLASS :
2012-08-05 14:39:37 +04:00
case T_MODULE :
2015-03-11 12:15:20 +03:00
mark_m_tbl ( objspace , RCLASS_M_TBL ( obj ) ) ;
2012-08-05 14:39:37 +04:00
if ( ! RCLASS_EXT ( obj ) ) break ;
2012-10-04 11:23:41 +04:00
mark_tbl ( objspace , RCLASS_IV_TBL ( obj ) ) ;
mark_const_tbl ( objspace , RCLASS_CONST_TBL ( obj ) ) ;
2014-09-08 08:11:00 +04:00
gc_mark ( objspace , RCLASS_SUPER ( ( VALUE ) obj ) ) ;
break ;
2012-08-05 14:39:37 +04:00
2015-03-11 12:15:20 +03:00
case T_ICLASS :
if ( FL_TEST ( obj , RICLASS_IS_ORIGIN ) ) {
mark_m_tbl ( objspace , RCLASS_M_TBL ( obj ) ) ;
}
if ( ! RCLASS_EXT ( obj ) ) break ;
2015-08-16 11:25:29 +03:00
mark_m_tbl ( objspace , RCLASS_CALLABLE_M_TBL ( obj ) ) ;
2015-03-11 12:15:20 +03:00
gc_mark ( objspace , RCLASS_SUPER ( ( VALUE ) obj ) ) ;
break ;
1998-01-16 15:13:05 +03:00
case T_ARRAY :
2018-10-31 00:02:12 +03:00
if ( FL_TEST ( obj , ELTS_SHARED ) ) {
2018-10-31 00:53:56 +03:00
VALUE root = any - > as . array . as . heap . aux . shared ;
2018-10-31 00:54:13 +03:00
gc_mark ( objspace , root ) ;
2000-09-27 07:43:15 +04:00
}
2012-08-05 14:39:37 +04:00
else {
long i , len = RARRAY_LEN ( obj ) ;
2018-10-31 00:54:13 +03:00
const VALUE * ptr = RARRAY_CONST_PTR_TRANSIENT ( obj ) ;
2012-08-05 14:39:37 +04:00
for ( i = 0 ; i < len ; i + + ) {
2018-10-31 00:54:13 +03:00
gc_mark ( objspace , ptr [ i ] ) ;
1999-01-20 07:59:39 +03:00
}
2018-10-31 00:53:56 +03:00
if ( objspace - > mark_func_data = = NULL ) {
if ( ! FL_TEST_RAW ( obj , RARRAY_EMBED_FLAG ) & &
RARRAY_TRANSIENT_P ( obj ) ) {
rb_transient_heap_mark ( obj , ptr ) ;
}
}
2018-10-31 00:02:12 +03:00
}
1998-01-16 15:13:05 +03:00
break ;
2012-08-05 14:39:37 +04:00
case T_HASH :
2018-10-31 01:12:12 +03:00
mark_hash ( objspace , obj ) ;
2014-09-08 08:11:00 +04:00
break ;
2012-08-05 14:39:37 +04:00
case T_STRING :
2014-02-05 06:49:41 +04:00
if ( STR_SHARED_P ( obj ) ) {
2014-09-08 08:11:00 +04:00
gc_mark ( objspace , any - > as . string . as . heap . aux . shared ) ;
1999-01-20 07:59:39 +03:00
}
1998-01-16 15:13:05 +03:00
break ;
2012-08-05 14:39:37 +04:00
case T_DATA :
2015-05-10 15:32:47 +03:00
{
void * const ptr = DATA_PTR ( obj ) ;
if ( ptr ) {
RUBY_DATA_FUNC mark_func = RTYPEDDATA_P ( obj ) ?
any - > as . typeddata . type - > function . dmark :
any - > as . data . dmark ;
if ( mark_func ) ( * mark_func ) ( ptr ) ;
}
2012-08-05 14:39:37 +04:00
}
2008-03-16 03:23:43 +03:00
break ;
2012-08-05 14:39:37 +04:00
case T_OBJECT :
{
2018-10-31 01:01:17 +03:00
const VALUE * const ptr = ROBJECT_IVPTR ( obj ) ;
if ( ptr ) {
uint32_t i , len = ROBJECT_NUMIV ( obj ) ;
for ( i = 0 ; i < len ; i + + ) {
gc_mark ( objspace , ptr [ i ] ) ;
}
if ( objspace - > mark_func_data = = NULL & &
ROBJ_TRANSIENT_P ( obj ) ) {
rb_transient_heap_mark ( obj , ptr ) ;
}
2012-08-05 14:39:37 +04:00
}
}
1998-01-16 15:13:05 +03:00
break ;
2012-08-05 14:39:37 +04:00
case T_FILE :
2014-09-08 08:11:00 +04:00
if ( any - > as . file . fptr ) {
gc_mark ( objspace , any - > as . file . fptr - > pathv ) ;
gc_mark ( objspace , any - > as . file . fptr - > tied_io_for_writing ) ;
gc_mark ( objspace , any - > as . file . fptr - > writeconv_asciicompat ) ;
gc_mark ( objspace , any - > as . file . fptr - > writeconv_pre_ecopts ) ;
gc_mark ( objspace , any - > as . file . fptr - > encs . ecopts ) ;
gc_mark ( objspace , any - > as . file . fptr - > write_lock ) ;
2012-08-05 14:39:37 +04:00
}
break ;
case T_REGEXP :
2014-09-08 08:11:00 +04:00
gc_mark ( objspace , any - > as . regexp . src ) ;
break ;
2012-08-05 14:39:37 +04:00
case T_MATCH :
2014-09-08 08:11:00 +04:00
gc_mark ( objspace , any - > as . match . regexp ) ;
if ( any - > as . match . str ) {
gc_mark ( objspace , any - > as . match . str ) ;
2000-09-27 07:43:15 +04:00
}
1998-01-16 15:13:05 +03:00
break ;
2012-08-05 14:39:37 +04:00
case T_RATIONAL :
2014-09-08 08:11:00 +04:00
gc_mark ( objspace , any - > as . rational . num ) ;
gc_mark ( objspace , any - > as . rational . den ) ;
break ;
2012-08-05 14:39:37 +04:00
case T_COMPLEX :
2014-09-08 08:11:00 +04:00
gc_mark ( objspace , any - > as . complex . real ) ;
gc_mark ( objspace , any - > as . complex . imag ) ;
break ;
1998-01-16 15:13:05 +03:00
case T_STRUCT :
2012-08-05 14:39:37 +04:00
{
2018-10-31 01:03:42 +03:00
long i ;
const long len = RSTRUCT_LEN ( obj ) ;
const VALUE * const ptr = RSTRUCT_CONST_PTR ( obj ) ;
2012-08-05 14:39:37 +04:00
2018-10-31 01:03:42 +03:00
for ( i = 0 ; i < len ; i + + ) {
gc_mark ( objspace , ptr [ i ] ) ;
}
if ( objspace - > mark_func_data = = NULL & &
RSTRUCT_TRANSIENT_P ( obj ) ) {
rb_transient_heap_mark ( obj , ptr ) ;
}
2000-09-27 07:43:15 +04:00
}
1998-01-16 15:13:05 +03:00
break ;
default :
2013-08-19 16:00:51 +04:00
# if GC_DEBUG
2013-06-19 10:11:15 +04:00
rb_gcdebug_print_obj_condition ( ( VALUE ) obj ) ;
# endif
if ( BUILTIN_TYPE ( obj ) = = T_NONE ) rb_bug ( " rb_gc_mark(): %p is T_NONE " , ( void * ) obj ) ;
if ( BUILTIN_TYPE ( obj ) = = T_ZOMBIE ) rb_bug ( " rb_gc_mark(): %p is T_ZOMBIE " , ( void * ) obj ) ;
2012-08-05 14:39:37 +04:00
rb_bug ( " rb_gc_mark(): unknown data type 0x%x(%p) %s " ,
2018-01-02 09:41:40 +03:00
BUILTIN_TYPE ( obj ) , ( void * ) any ,
2014-09-08 08:11:00 +04:00
is_pointer_to_heap ( objspace , any ) ? " corrupted object " : " non object " ) ;
1998-01-16 15:13:05 +03:00
}
}
2014-09-08 08:11:00 +04:00
/**
* incremental : 0 - > not incremental ( do all )
* incremental : n - > mark at most ` n ' objects
*/
static inline int
gc_mark_stacked_objects ( rb_objspace_t * objspace , int incremental , size_t count )
2012-10-04 11:31:31 +04:00
{
mark_stack_t * mstack = & objspace - > mark_stack ;
2014-09-08 08:11:00 +04:00
VALUE obj ;
# if GC_ENABLE_INCREMENTAL_MARK
2015-12-09 08:49:27 +03:00
size_t marked_slots_at_the_beginning = objspace - > marked_slots ;
2014-11-13 23:16:59 +03:00
size_t popped_count = 0 ;
2014-09-08 08:11:00 +04:00
# endif
2012-10-04 11:31:31 +04:00
while ( pop_mark_stack ( mstack , & obj ) ) {
2014-09-08 08:11:00 +04:00
if ( obj = = Qundef ) continue ; /* skip */
if ( RGENGC_CHECK_MODE & & ! RVALUE_MARKED ( obj ) ) {
rb_bug ( " gc_mark_stacked_objects: %s is not marked. " , obj_info ( obj ) ) ;
2013-11-21 08:57:37 +04:00
}
2012-10-04 11:31:31 +04:00
gc_mark_children ( objspace , obj ) ;
2013-09-27 12:12:31 +04:00
2014-09-08 08:11:00 +04:00
# if GC_ENABLE_INCREMENTAL_MARK
if ( incremental ) {
if ( RGENGC_CHECK_MODE & & ! RVALUE_MARKING ( obj ) ) {
rb_bug ( " gc_mark_stacked_objects: incremental, but marking bit is 0 " ) ;
}
CLEAR_IN_BITMAP ( GET_HEAP_MARKING_BITS ( obj ) , obj ) ;
2014-11-13 23:16:59 +03:00
popped_count + + ;
2013-09-27 12:12:31 +04:00
2015-12-09 08:49:27 +03:00
if ( popped_count + ( objspace - > marked_slots - marked_slots_at_the_beginning ) > count ) {
2014-09-08 08:11:00 +04:00
break ;
}
}
else {
/* just ignore marking bits */
}
# endif
}
2013-09-27 12:12:31 +04:00
2015-03-19 10:19:52 +03:00
if ( RGENGC_CHECK_MODE > = 3 ) gc_verify_internal_consistency ( Qnil ) ;
2013-09-27 12:12:31 +04:00
2014-09-08 08:11:00 +04:00
if ( is_mark_stack_empty ( mstack ) ) {
shrink_stack_chunk_cache ( mstack ) ;
return TRUE ;
}
else {
return FALSE ;
}
2013-09-27 12:12:31 +04:00
}
2014-09-08 08:11:00 +04:00
static int
gc_mark_stacked_objects_incremental ( rb_objspace_t * objspace , size_t count )
2013-09-27 12:12:31 +04:00
{
2014-09-08 08:11:00 +04:00
return gc_mark_stacked_objects ( objspace , TRUE , count ) ;
2013-09-27 12:12:31 +04:00
}
2014-09-08 08:11:00 +04:00
static int
gc_mark_stacked_objects_all ( rb_objspace_t * objspace )
2013-09-27 12:12:31 +04:00
{
2014-09-08 08:11:00 +04:00
return gc_mark_stacked_objects ( objspace , FALSE , 0 ) ;
2013-09-27 12:12:31 +04:00
}
2014-09-08 08:11:00 +04:00
# if PRINT_ROOT_TICKS
2013-09-27 12:12:31 +04:00
# define MAX_TICKS 0x100
static tick_t mark_ticks [ MAX_TICKS ] ;
2013-10-15 14:22:33 +04:00
static const char * mark_ticks_categories [ MAX_TICKS ] ;
2013-09-27 12:12:31 +04:00
static void
show_mark_ticks ( void )
{
int i ;
fprintf ( stderr , " mark ticks result: \n " ) ;
for ( i = 0 ; i < MAX_TICKS ; i + + ) {
2013-10-15 14:22:33 +04:00
const char * category = mark_ticks_categories [ i ] ;
if ( category ) {
fprintf ( stderr , " %s \t %8lu \n " , category , ( unsigned long ) mark_ticks [ i ] ) ;
}
else {
break ;
}
2013-09-27 12:12:31 +04:00
}
}
2014-09-08 08:11:00 +04:00
# endif /* PRITNT_ROOT_TICKS */
2013-09-27 12:12:31 +04:00
2010-05-28 15:13:42 +04:00
static void
2014-09-08 08:11:00 +04:00
gc_mark_roots ( rb_objspace_t * objspace , const char * * categoryp )
1998-01-16 15:13:05 +03:00
{
2008-07-05 11:15:41 +04:00
struct gc_list * list ;
2017-10-29 16:50:24 +03:00
rb_execution_context_t * ec = GET_EC ( ) ;
rb_vm_t * vm = rb_ec_vm_ptr ( ec ) ;
2014-09-08 08:11:00 +04:00
# if PRINT_ROOT_TICKS
2013-09-27 12:12:31 +04:00
tick_t start_tick = tick ( ) ;
2013-10-15 14:22:33 +04:00
int tick_count = 0 ;
const char * prev_category = 0 ;
if ( mark_ticks_categories [ 0 ] = = 0 ) {
2013-09-27 12:12:31 +04:00
atexit ( show_mark_ticks ) ;
}
# endif
2015-04-23 23:40:19 +03:00
if ( categoryp ) * categoryp = " xxx " ;
# if USE_RGENGC
objspace - > rgengc . parent_object = Qfalse ;
# endif
2014-09-08 08:11:00 +04:00
# if PRINT_ROOT_TICKS
2013-10-15 14:22:33 +04:00
# define MARK_CHECKPOINT_PRINT_TICK(category) do { \
if ( prev_category ) { \
tick_t t = tick ( ) ; \
mark_ticks [ tick_count ] = t - start_tick ; \
mark_ticks_categories [ tick_count ] = prev_category ; \
tick_count + + ; \
} \
prev_category = category ; \
start_tick = tick ( ) ; \
} while ( 0 )
2014-09-08 08:11:00 +04:00
# else /* PRITNT_ROOT_TICKS */
2013-10-15 14:22:33 +04:00
# define MARK_CHECKPOINT_PRINT_TICK(category)
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# endif
1998-01-16 15:13:05 +03:00
2013-10-15 14:22:33 +04:00
# define MARK_CHECKPOINT(category) do { \
if ( categoryp ) * categoryp = category ; \
MARK_CHECKPOINT_PRINT_TICK ( category ) ; \
} while ( 0 )
MARK_CHECKPOINT ( " vm " ) ;
2007-02-05 15:21:01 +03:00
SET_STACK_END ;
2017-10-29 16:50:24 +03:00
rb_vm_mark ( vm ) ;
if ( vm - > self ) gc_mark ( objspace , vm - > self ) ;
2006-01-10 13:50:17 +03:00
2013-10-16 02:33:36 +04:00
MARK_CHECKPOINT ( " finalizers " ) ;
2013-10-11 22:03:43 +04:00
mark_tbl ( objspace , finalizer_table ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2013-10-16 02:33:36 +04:00
MARK_CHECKPOINT ( " machine_context " ) ;
2017-10-29 16:50:24 +03:00
mark_current_machine_context ( objspace , ec ) ;
2006-12-31 18:02:22 +03:00
1998-01-16 15:13:05 +03:00
/* mark protected global variables */
2013-10-16 02:33:36 +04:00
MARK_CHECKPOINT ( " global_list " ) ;
2014-07-10 07:24:17 +04:00
for ( list = global_list ; list ; list = list - > next ) {
2003-04-21 12:17:18 +04:00
rb_gc_mark_maybe ( * list - > varptr ) ;
1998-01-16 15:13:05 +03:00
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2013-10-16 02:33:36 +04:00
MARK_CHECKPOINT ( " end_proc " ) ;
1999-11-26 12:07:26 +03:00
rb_mark_end_proc ( ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2013-10-16 02:33:36 +04:00
MARK_CHECKPOINT ( " global_tbl " ) ;
1999-01-20 07:59:39 +03:00
rb_gc_mark_global_tbl ( ) ;
1998-01-16 15:13:05 +03:00
2015-05-27 08:55:00 +03:00
if ( stress_to_class ) rb_gc_mark ( stress_to_class ) ;
2013-10-15 14:22:33 +04:00
MARK_CHECKPOINT ( " finish " ) ;
2013-06-20 00:43:33 +04:00
# undef MARK_CHECKPOINT
2013-10-15 14:22:33 +04:00
}
2014-09-08 08:11:00 +04:00
# if RGENGC_CHECK_MODE >= 4
2013-06-22 10:43:30 +04:00
2013-11-21 08:57:37 +04:00
# define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
# define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
# define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
struct reflist {
VALUE * list ;
int pos ;
int size ;
} ;
static struct reflist *
reflist_create ( VALUE obj )
{
struct reflist * refs = xmalloc ( sizeof ( struct reflist ) ) ;
refs - > size = 1 ;
refs - > list = ALLOC_N ( VALUE , refs - > size ) ;
refs - > list [ 0 ] = obj ;
refs - > pos = 1 ;
return refs ;
}
2013-06-22 10:43:30 +04:00
static void
2013-11-21 08:57:37 +04:00
reflist_destruct ( struct reflist * refs )
2013-06-22 10:43:30 +04:00
{
2013-11-21 08:57:37 +04:00
xfree ( refs - > list ) ;
xfree ( refs ) ;
}
2013-06-22 10:43:30 +04:00
2013-12-10 10:58:48 +04:00
static void
2013-11-21 08:57:37 +04:00
reflist_add ( struct reflist * refs , VALUE obj )
{
if ( refs - > pos = = refs - > size ) {
refs - > size * = 2 ;
SIZED_REALLOC_N ( refs - > list , VALUE , refs - > size , refs - > size / 2 ) ;
2013-06-22 10:43:30 +04:00
}
2013-12-10 09:46:48 +04:00
2013-11-21 08:57:37 +04:00
refs - > list [ refs - > pos + + ] = obj ;
2013-06-22 10:43:30 +04:00
}
2013-11-21 08:57:37 +04:00
static void
reflist_dump ( struct reflist * refs )
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
{
2013-11-21 08:57:37 +04:00
int i ;
for ( i = 0 ; i < refs - > pos ; i + + ) {
VALUE obj = refs - > list [ i ] ;
if ( IS_ROOTSIG ( obj ) ) { /* root */
fprintf ( stderr , " <root@%s> " , GET_ROOTSIG ( obj ) ) ;
}
else {
2014-09-08 08:11:00 +04:00
fprintf ( stderr , " <%s> " , obj_info ( obj ) ) ;
2013-11-21 08:57:37 +04:00
}
if ( i + 1 < refs - > pos ) fprintf ( stderr , " , " ) ;
2013-06-19 18:59:35 +04:00
}
2013-06-22 10:43:30 +04:00
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2013-11-21 08:57:37 +04:00
static int
2018-01-18 14:44:10 +03:00
reflist_referred_from_machine_context ( struct reflist * refs )
2013-06-19 18:59:35 +04:00
{
2013-11-21 08:57:37 +04:00
int i ;
for ( i = 0 ; i < refs - > pos ; i + + ) {
VALUE obj = refs - > list [ i ] ;
if ( IS_ROOTSIG ( obj ) & & strcmp ( GET_ROOTSIG ( obj ) , " machine_context " ) = = 0 ) return 1 ;
}
return 0 ;
}
2013-06-19 18:59:35 +04:00
2013-11-21 08:57:37 +04:00
struct allrefs {
rb_objspace_t * objspace ;
/* a -> obj1
* b - > obj1
* c - > obj1
* c - > obj2
* d - > obj3
* # = > { obj1 = > [ a , b , c ] , obj2 = > [ c , d ] }
*/
struct st_table * references ;
const char * category ;
VALUE root_obj ;
2014-09-08 08:11:00 +04:00
mark_stack_t mark_stack ;
2013-11-21 08:57:37 +04:00
} ;
2013-06-19 18:59:35 +04:00
2013-12-10 09:46:48 +04:00
static int
2013-11-21 08:57:37 +04:00
allrefs_add ( struct allrefs * data , VALUE obj )
{
struct reflist * refs ;
2013-06-19 18:59:35 +04:00
2013-11-21 08:57:37 +04:00
if ( st_lookup ( data - > references , obj , ( st_data_t * ) & refs ) ) {
2013-12-10 10:47:15 +04:00
reflist_add ( refs , data - > root_obj ) ;
return 0 ;
2013-11-21 08:57:37 +04:00
}
else {
refs = reflist_create ( data - > root_obj ) ;
st_insert ( data - > references , obj , ( st_data_t ) refs ) ;
2013-12-10 09:46:48 +04:00
return 1 ;
2013-06-19 18:59:35 +04:00
}
}
static void
2013-11-21 08:57:37 +04:00
allrefs_i ( VALUE obj , void * ptr )
2013-06-19 18:59:35 +04:00
{
2013-11-21 08:57:37 +04:00
struct allrefs * data = ( struct allrefs * ) ptr ;
2013-12-10 10:47:15 +04:00
if ( allrefs_add ( data , obj ) ) {
2014-09-08 08:11:00 +04:00
push_mark_stack ( & data - > mark_stack , obj ) ;
2013-12-10 10:58:48 +04:00
}
2013-12-10 10:47:15 +04:00
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2013-06-19 18:59:35 +04:00
static void
2013-11-21 08:57:37 +04:00
allrefs_roots_i ( VALUE obj , void * ptr )
2013-06-19 18:59:35 +04:00
{
2013-11-21 08:57:37 +04:00
struct allrefs * data = ( struct allrefs * ) ptr ;
if ( strlen ( data - > category ) = = 0 ) rb_bug ( " !!! " ) ;
data - > root_obj = MAKE_ROOTSIG ( data - > category ) ;
2013-12-10 10:47:15 +04:00
if ( allrefs_add ( data , obj ) ) {
2014-09-08 08:11:00 +04:00
push_mark_stack ( & data - > mark_stack , obj ) ;
2013-12-10 10:58:48 +04:00
}
2013-12-10 10:47:15 +04:00
}
2013-06-22 10:43:30 +04:00
2013-11-21 08:57:37 +04:00
static st_table *
objspace_allrefs ( rb_objspace_t * objspace )
{
struct allrefs data ;
struct mark_func_data_struct mfd ;
VALUE obj ;
2014-09-08 08:11:00 +04:00
int prev_dont_gc = dont_gc ;
dont_gc = TRUE ;
2013-11-21 08:57:37 +04:00
data . objspace = objspace ;
data . references = st_init_numtable ( ) ;
2014-09-08 08:11:00 +04:00
init_mark_stack ( & data . mark_stack ) ;
2013-11-21 08:57:37 +04:00
mfd . mark_func = allrefs_roots_i ;
mfd . data = & data ;
/* traverse root objects */
2014-09-08 08:11:00 +04:00
PUSH_MARK_FUNC_DATA ( & mfd ) ;
2013-11-21 08:57:37 +04:00
objspace - > mark_func_data = & mfd ;
2014-09-08 08:11:00 +04:00
gc_mark_roots ( objspace , & data . category ) ;
POP_MARK_FUNC_DATA ( ) ;
2013-06-22 10:43:30 +04:00
2013-11-21 08:57:37 +04:00
/* traverse rest objects reachable from root objects */
2014-09-08 08:11:00 +04:00
while ( pop_mark_stack ( & data . mark_stack , & obj ) ) {
2013-11-21 08:57:37 +04:00
rb_objspace_reachable_objects_from ( data . root_obj = obj , allrefs_i , & data ) ;
2013-06-22 10:43:30 +04:00
}
2014-09-08 08:11:00 +04:00
free_stack_chunks ( & data . mark_stack ) ;
2013-06-22 10:43:30 +04:00
2014-09-08 08:11:00 +04:00
dont_gc = prev_dont_gc ;
2013-11-21 08:57:37 +04:00
return data . references ;
}
static int
2015-03-29 15:08:43 +03:00
objspace_allrefs_destruct_i ( st_data_t key , st_data_t value , void * ptr )
2013-11-21 08:57:37 +04:00
{
struct reflist * refs = ( struct reflist * ) value ;
reflist_destruct ( refs ) ;
return ST_CONTINUE ;
2013-06-22 10:43:30 +04:00
}
static void
2013-12-10 07:25:28 +04:00
objspace_allrefs_destruct ( struct st_table * refs )
2013-06-22 10:43:30 +04:00
{
2015-03-29 15:08:43 +03:00
st_foreach ( refs , objspace_allrefs_destruct_i , 0 ) ;
2013-11-21 08:57:37 +04:00
st_free_table ( refs ) ;
}
2013-06-22 10:43:30 +04:00
2014-09-08 08:11:00 +04:00
# if RGENGC_CHECK_MODE >= 5
2013-11-21 08:57:37 +04:00
static int
allrefs_dump_i ( st_data_t k , st_data_t v , st_data_t ptr )
{
VALUE obj = ( VALUE ) k ;
struct reflist * refs = ( struct reflist * ) v ;
2014-09-08 08:11:00 +04:00
fprintf ( stderr , " [allrefs_dump_i] %s <- " , obj_info ( obj ) ) ;
2013-11-21 08:57:37 +04:00
reflist_dump ( refs ) ;
fprintf ( stderr , " \n " ) ;
return ST_CONTINUE ;
}
2013-06-22 10:43:30 +04:00
2013-11-21 08:57:37 +04:00
static void
allrefs_dump ( rb_objspace_t * objspace )
{
fprintf ( stderr , " [all refs] (size: %d) \n " , ( int ) objspace - > rgengc . allrefs_table - > num_entries ) ;
st_foreach ( objspace - > rgengc . allrefs_table , allrefs_dump_i , 0 ) ;
}
# endif
2013-06-22 10:43:30 +04:00
2013-11-21 08:57:37 +04:00
static int
gc_check_after_marks_i ( st_data_t k , st_data_t v , void * ptr )
2013-06-22 10:43:30 +04:00
{
2013-11-21 08:57:37 +04:00
VALUE obj = k ;
struct reflist * refs = ( struct reflist * ) v ;
rb_objspace_t * objspace = ( rb_objspace_t * ) ptr ;
2013-06-22 10:43:30 +04:00
2013-11-21 08:57:37 +04:00
/* object should be marked or oldgen */
if ( ! MARKED_IN_BITMAP ( GET_HEAP_MARK_BITS ( obj ) , obj ) ) {
2014-09-08 08:11:00 +04:00
fprintf ( stderr , " gc_check_after_marks_i: %s is not marked and not oldgen. \n " , obj_info ( obj ) ) ;
2013-11-21 08:57:37 +04:00
fprintf ( stderr , " gc_check_after_marks_i: %p is referred from " , ( void * ) obj ) ;
reflist_dump ( refs ) ;
2013-06-24 19:57:16 +04:00
2018-01-18 14:44:10 +03:00
if ( reflist_referred_from_machine_context ( refs ) ) {
2013-11-21 08:57:37 +04:00
fprintf ( stderr , " (marked from machine stack). \n " ) ;
/* marked from machine context can be false positive */
}
else {
objspace - > rgengc . error_count + + ;
fprintf ( stderr , " \n " ) ;
}
2013-06-24 19:57:16 +04:00
}
2013-11-21 08:57:37 +04:00
return ST_CONTINUE ;
}
2013-06-19 18:59:35 +04:00
2013-11-21 08:57:37 +04:00
static void
gc_marks_check ( rb_objspace_t * objspace , int ( * checker_func ) ( ANYARGS ) , const char * checker_name )
{
2018-05-18 11:40:16 +03:00
size_t saved_malloc_increase = objspace - > malloc_params . increase ;
2013-12-10 10:21:52 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
2018-05-18 11:40:16 +03:00
size_t saved_oldmalloc_increase = objspace - > rgengc . oldmalloc_increase ;
2013-12-10 10:21:52 +04:00
# endif
VALUE already_disabled = rb_gc_disable ( ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2013-12-10 10:21:52 +04:00
objspace - > rgengc . allrefs_table = objspace_allrefs ( objspace ) ;
2014-04-10 14:01:16 +04:00
if ( checker_func ) {
st_foreach ( objspace - > rgengc . allrefs_table , checker_func , ( st_data_t ) objspace ) ;
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2013-11-21 08:57:37 +04:00
if ( objspace - > rgengc . error_count > 0 ) {
2014-09-08 08:11:00 +04:00
# if RGENGC_CHECK_MODE >= 5
2013-11-21 08:57:37 +04:00
allrefs_dump ( objspace ) ;
# endif
2014-04-10 14:01:16 +04:00
if ( checker_name ) rb_bug ( " %s: GC has problem. " , checker_name ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2013-06-19 02:04:30 +04:00
2013-12-10 07:25:28 +04:00
objspace_allrefs_destruct ( objspace - > rgengc . allrefs_table ) ;
2013-11-21 08:57:37 +04:00
objspace - > rgengc . allrefs_table = 0 ;
2013-12-10 10:21:52 +04:00
if ( already_disabled = = Qfalse ) rb_gc_enable ( ) ;
2018-05-18 11:40:16 +03:00
objspace - > malloc_params . increase = saved_malloc_increase ;
2013-12-10 10:21:52 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
2018-05-18 11:40:16 +03:00
objspace - > rgengc . oldmalloc_increase = saved_oldmalloc_increase ;
2013-12-10 10:21:52 +04:00
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2014-09-08 08:11:00 +04:00
# endif /* RGENGC_CHECK_MODE >= 4 */
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2014-04-10 14:01:16 +04:00
struct verify_internal_consistency_struct {
rb_objspace_t * objspace ;
int err_count ;
2014-06-03 11:50:23 +04:00
size_t live_object_count ;
2014-06-30 10:14:37 +04:00
size_t zombie_object_count ;
# if USE_RGENGC
VALUE parent ;
2014-06-03 11:50:23 +04:00
size_t old_object_count ;
2014-09-08 08:11:00 +04:00
size_t remembered_shady_count ;
2014-06-30 10:14:37 +04:00
# endif
2014-04-10 14:01:16 +04:00
} ;
2014-06-30 10:14:37 +04:00
# if USE_RGENGC
2014-04-10 14:01:16 +04:00
static void
2014-09-08 08:11:00 +04:00
check_generation_i ( const VALUE child , void * ptr )
2014-04-10 14:01:16 +04:00
{
struct verify_internal_consistency_struct * data = ( struct verify_internal_consistency_struct * ) ptr ;
2014-09-08 08:11:00 +04:00
const VALUE parent = data - > parent ;
2014-04-10 14:01:16 +04:00
2017-06-22 08:03:18 +03:00
if ( RGENGC_CHECK_MODE ) GC_ASSERT ( RVALUE_OLD_P ( parent ) ) ;
2014-04-10 14:01:16 +04:00
if ( ! RVALUE_OLD_P ( child ) ) {
2014-09-08 08:11:00 +04:00
if ( ! RVALUE_REMEMBERED ( parent ) & &
! RVALUE_REMEMBERED ( child ) & &
2015-03-18 21:02:13 +03:00
! RVALUE_UNCOLLECTIBLE ( child ) ) {
2014-09-08 08:11:00 +04:00
fprintf ( stderr , " verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s \n " , obj_info ( parent ) , obj_info ( child ) ) ;
2014-04-10 14:01:16 +04:00
data - > err_count + + ;
}
}
}
2014-09-08 08:11:00 +04:00
static void
check_color_i ( const VALUE child , void * ptr )
{
struct verify_internal_consistency_struct * data = ( struct verify_internal_consistency_struct * ) ptr ;
const VALUE parent = data - > parent ;
if ( ! RVALUE_WB_UNPROTECTED ( parent ) & & RVALUE_WHITE_P ( child ) ) {
fprintf ( stderr , " verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s \n " ,
obj_info ( parent ) , obj_info ( child ) ) ;
data - > err_count + + ;
}
}
2014-06-30 10:14:37 +04:00
# endif
2014-04-10 14:01:16 +04:00
2014-09-08 08:11:00 +04:00
static void
check_children_i ( const VALUE child , void * ptr )
{
check_rvalue_consistency ( child ) ;
}
2014-04-10 14:01:16 +04:00
static int
verify_internal_consistency_i ( void * page_start , void * page_end , size_t stride , void * ptr )
{
struct verify_internal_consistency_struct * data = ( struct verify_internal_consistency_struct * ) ptr ;
2014-09-08 08:11:00 +04:00
VALUE obj ;
rb_objspace_t * objspace = data - > objspace ;
2014-04-10 14:01:16 +04:00
2014-09-08 08:11:00 +04:00
for ( obj = ( VALUE ) page_start ; obj ! = ( VALUE ) page_end ; obj + = stride ) {
2019-04-02 02:55:02 +03:00
void * poisoned = poisoned_object_p ( obj ) ;
unpoison_object ( obj , false ) ;
2014-09-08 08:11:00 +04:00
if ( is_live_object ( objspace , obj ) ) {
2014-06-03 11:50:23 +04:00
/* count objects */
data - > live_object_count + + ;
2014-06-30 10:14:37 +04:00
2014-09-08 08:11:00 +04:00
rb_objspace_reachable_objects_from ( obj , check_children_i , ( void * ) data ) ;
2014-06-30 10:14:37 +04:00
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
/* check health of children */
data - > parent = obj ;
if ( RVALUE_OLD_P ( obj ) ) data - > old_object_count + + ;
2015-03-18 21:02:13 +03:00
if ( RVALUE_WB_UNPROTECTED ( obj ) & & RVALUE_UNCOLLECTIBLE ( obj ) ) data - > remembered_shady_count + + ;
2014-09-08 08:11:00 +04:00
if ( ! is_marking ( objspace ) & & RVALUE_OLD_P ( obj ) ) {
2014-04-10 14:01:16 +04:00
/* reachable objects from an oldgen object should be old or (young with remember) */
2014-09-08 08:11:00 +04:00
data - > parent = obj ;
rb_objspace_reachable_objects_from ( obj , check_generation_i , ( void * ) data ) ;
}
if ( is_incremental_marking ( objspace ) ) {
if ( RVALUE_BLACK_P ( obj ) ) {
/* reachable objects from black objects should be black or grey objects */
data - > parent = obj ;
rb_objspace_reachable_objects_from ( obj , check_color_i , ( void * ) data ) ;
}
2014-04-10 14:01:16 +04:00
}
2014-06-30 10:14:37 +04:00
# endif
2014-04-10 14:01:16 +04:00
}
2014-06-04 17:33:20 +04:00
else {
2014-09-08 08:11:00 +04:00
if ( BUILTIN_TYPE ( obj ) = = T_ZOMBIE ) {
2017-06-22 08:03:18 +03:00
GC_ASSERT ( RBASIC ( obj ) - > flags = = T_ZOMBIE ) ;
2014-06-04 17:33:20 +04:00
data - > zombie_object_count + + ;
}
}
2019-04-02 02:55:02 +03:00
if ( poisoned ) {
GC_ASSERT ( BUILTIN_TYPE ( obj ) = = T_NONE ) ;
poison_object ( obj ) ;
}
2014-04-10 14:01:16 +04:00
}
return 0 ;
}
2014-09-08 08:11:00 +04:00
static int
gc_verify_heap_page ( rb_objspace_t * objspace , struct heap_page * page , VALUE obj )
{
# if USE_RGENGC
int i ;
unsigned int has_remembered_shady = FALSE ;
unsigned int has_remembered_old = FALSE ;
2018-01-18 14:44:10 +03:00
int remembered_old_objects = 0 ;
2016-03-31 10:02:40 +03:00
int free_objects = 0 ;
int zombie_objects = 0 ;
2014-09-08 08:11:00 +04:00
for ( i = 0 ; i < page - > total_slots ; i + + ) {
2017-10-18 07:35:25 +03:00
VALUE val = ( VALUE ) & page - > start [ i ] ;
2019-04-02 02:55:02 +03:00
void * poisoned = poisoned_object_p ( val ) ;
unpoison_object ( val , false ) ;
2017-10-18 07:35:25 +03:00
if ( RBASIC ( val ) = = 0 ) free_objects + + ;
if ( BUILTIN_TYPE ( val ) = = T_ZOMBIE ) zombie_objects + + ;
if ( RVALUE_PAGE_UNCOLLECTIBLE ( page , val ) & & RVALUE_PAGE_WB_UNPROTECTED ( page , val ) ) {
has_remembered_shady = TRUE ;
}
if ( RVALUE_PAGE_MARKING ( page , val ) ) {
2014-09-08 08:11:00 +04:00
has_remembered_old = TRUE ;
2018-01-18 14:44:10 +03:00
remembered_old_objects + + ;
2014-09-08 08:11:00 +04:00
}
2019-04-02 02:55:02 +03:00
if ( poisoned ) {
GC_ASSERT ( BUILTIN_TYPE ( val ) = = T_NONE ) ;
poison_object ( val ) ;
}
2014-09-08 08:11:00 +04:00
}
if ( ! is_incremental_marking ( objspace ) & &
page - > flags . has_remembered_objects = = FALSE & & has_remembered_old = = TRUE ) {
2016-03-31 10:02:41 +03:00
2014-09-08 08:11:00 +04:00
for ( i = 0 ; i < page - > total_slots ; i + + ) {
2017-10-18 07:35:25 +03:00
VALUE val = ( VALUE ) & page - > start [ i ] ;
if ( RVALUE_PAGE_MARKING ( page , val ) ) {
fprintf ( stderr , " marking -> %s \n " , obj_info ( val ) ) ;
2014-09-08 08:11:00 +04:00
}
}
rb_bug ( " page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s " ,
2018-01-18 14:44:10 +03:00
( void * ) page , remembered_old_objects , obj ? obj_info ( obj ) : " " ) ;
2014-09-08 08:11:00 +04:00
}
2015-03-18 21:02:13 +03:00
if ( page - > flags . has_uncollectible_shady_objects = = FALSE & & has_remembered_shady = = TRUE ) {
2014-09-08 08:11:00 +04:00
rb_bug ( " page %p's has_remembered_shady should be false, but there are remembered shady objects. %s " ,
2018-01-02 09:41:40 +03:00
( void * ) page , obj ? obj_info ( obj ) : " " ) ;
2014-09-08 08:11:00 +04:00
}
2016-03-31 10:02:40 +03:00
if ( 0 ) {
/* free_slots may not equal to free_objects */
if ( page - > free_slots ! = free_objects ) {
2018-01-02 09:41:40 +03:00
rb_bug ( " page %p's free_slots should be %d, but %d \n " , ( void * ) page , ( int ) page - > free_slots , free_objects ) ;
2016-03-31 10:02:40 +03:00
}
}
if ( page - > final_slots ! = zombie_objects ) {
2018-01-02 09:41:40 +03:00
rb_bug ( " page %p's final_slots should be %d, but %d \n " , ( void * ) page , ( int ) page - > final_slots , zombie_objects ) ;
2016-03-31 10:02:40 +03:00
}
2018-01-18 14:44:10 +03:00
return remembered_old_objects ;
2014-09-08 08:11:00 +04:00
# else
return 0 ;
# endif
}
static int
2018-05-16 23:39:30 +03:00
gc_verify_heap_pages_ ( rb_objspace_t * objspace , struct list_head * head )
2014-09-08 08:11:00 +04:00
{
2018-01-18 14:44:10 +03:00
int remembered_old_objects = 0 ;
2018-05-16 23:39:30 +03:00
struct heap_page * page = 0 ;
2014-09-08 08:11:00 +04:00
2018-05-16 23:39:30 +03:00
list_for_each ( head , page , page_node ) {
2019-04-02 22:13:07 +03:00
unpoison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) , false ) ;
RVALUE * p = page - > freelist ;
while ( p ) {
RVALUE * prev = p ;
unpoison_object ( p , false ) ;
if ( BUILTIN_TYPE ( p ) ! = T_NONE ) {
fprintf ( stderr , " freelist slot expected to be T_NONE but was: %s \n " , obj_info ( ( VALUE ) p ) ) ;
}
p = p - > as . free . next ;
poison_object ( prev ) ;
}
poison_memory_region ( & page - > freelist , sizeof ( RVALUE * ) ) ;
2016-03-31 10:02:40 +03:00
if ( page - > flags . has_remembered_objects = = FALSE ) {
2018-01-18 14:44:10 +03:00
remembered_old_objects + = gc_verify_heap_page ( objspace , page , Qfalse ) ;
2016-03-31 10:02:40 +03:00
}
2014-09-08 08:11:00 +04:00
}
2018-01-18 14:44:10 +03:00
return remembered_old_objects ;
2014-09-08 08:11:00 +04:00
}
2016-03-31 10:02:40 +03:00
static int
gc_verify_heap_pages ( rb_objspace_t * objspace )
{
2018-01-18 14:44:10 +03:00
int remembered_old_objects = 0 ;
2018-06-19 10:12:43 +03:00
remembered_old_objects + = gc_verify_heap_pages_ ( objspace , & heap_eden - > pages ) ;
remembered_old_objects + = gc_verify_heap_pages_ ( objspace , & heap_tomb - > pages ) ;
2018-01-18 14:44:10 +03:00
return remembered_old_objects ;
2016-03-31 10:02:40 +03:00
}
2014-04-10 14:01:16 +04:00
/*
* call - seq :
* GC . verify_internal_consistency - > nil
*
* Verify internal consistency .
*
* This method is implementation specific .
* Now this method checks generational consistency
* if RGenGC is supported .
*/
static VALUE
2015-03-19 10:19:52 +03:00
gc_verify_internal_consistency ( VALUE dummy )
2014-04-10 14:01:16 +04:00
{
2014-06-03 11:50:23 +04:00
rb_objspace_t * objspace = & rb_objspace ;
2014-06-30 10:14:37 +04:00
struct verify_internal_consistency_struct data = { 0 } ;
2014-09-08 08:11:00 +04:00
struct each_obj_args eo_args ;
2014-06-03 11:50:23 +04:00
data . objspace = objspace ;
2014-09-08 08:11:00 +04:00
gc_report ( 5 , objspace , " gc_verify_internal_consistency: start \n " ) ;
2014-06-03 11:50:23 +04:00
2014-09-08 08:11:00 +04:00
/* check relations */
eo_args . callback = verify_internal_consistency_i ;
eo_args . data = ( void * ) & data ;
objspace_each_objects ( ( VALUE ) & eo_args ) ;
2014-06-30 10:14:37 +04:00
2014-04-10 14:01:16 +04:00
if ( data . err_count ! = 0 ) {
2014-09-08 08:11:00 +04:00
# if RGENGC_CHECK_MODE >= 5
2014-04-10 14:01:16 +04:00
objspace - > rgengc . error_count = data . err_count ;
gc_marks_check ( objspace , NULL , NULL ) ;
allrefs_dump ( objspace ) ;
# endif
2014-06-03 11:50:23 +04:00
rb_bug ( " gc_verify_internal_consistency: found internal inconsistency. " ) ;
2014-04-10 14:01:16 +04:00
}
2014-06-03 11:50:23 +04:00
2014-09-08 08:11:00 +04:00
/* check heap_page status */
gc_verify_heap_pages ( objspace ) ;
/* check counters */
2014-06-03 11:50:23 +04:00
if ( ! is_lazy_sweeping ( heap_eden ) & & ! finalizing ) {
2014-09-10 05:42:09 +04:00
if ( objspace_live_slots ( objspace ) ! = data . live_object_count ) {
2014-09-09 14:01:18 +04:00
fprintf ( stderr , " heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d \n " ,
( int ) heap_pages_final_slots , ( int ) objspace - > profile . total_freed_objects ) ;
2017-09-04 15:15:06 +03:00
rb_bug ( " inconsistent live slot number: expect % " PRIuSIZE " , but % " PRIuSIZE " . " , objspace_live_slots ( objspace ) , data . live_object_count ) ;
2014-06-03 11:50:23 +04:00
}
}
2014-06-30 10:14:37 +04:00
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
if ( ! is_marking ( objspace ) ) {
2014-09-10 06:35:17 +04:00
if ( objspace - > rgengc . old_objects ! = data . old_object_count ) {
2017-09-04 15:15:06 +03:00
rb_bug ( " inconsistent old slot number: expect % " PRIuSIZE " , but % " PRIuSIZE " . " , objspace - > rgengc . old_objects , data . old_object_count ) ;
2014-09-08 08:11:00 +04:00
}
2015-03-18 21:02:13 +03:00
if ( objspace - > rgengc . uncollectible_wb_unprotected_objects ! = data . remembered_shady_count ) {
2017-09-04 15:15:06 +03:00
rb_bug ( " inconsistent old slot number: expect % " PRIuSIZE " , but % " PRIuSIZE " . " , objspace - > rgengc . uncollectible_wb_unprotected_objects , data . remembered_shady_count ) ;
2014-09-08 08:11:00 +04:00
}
2014-06-03 11:50:23 +04:00
}
# endif
2014-06-04 17:33:20 +04:00
if ( ! finalizing ) {
size_t list_count = 0 ;
{
VALUE z = heap_pages_deferred_final ;
while ( z ) {
list_count + + ;
z = RZOMBIE ( z ) - > next ;
}
}
if ( heap_pages_final_slots ! = data . zombie_object_count | |
heap_pages_final_slots ! = list_count ) {
rb_bug ( " inconsistent finalizing object count: \n "
" expect % " PRIuSIZE " \n "
" but % " PRIuSIZE " zombies \n "
" heap_pages_deferred_final list has % " PRIuSIZE " items. " ,
heap_pages_final_slots ,
data . zombie_object_count ,
list_count ) ;
}
}
2014-09-08 08:11:00 +04:00
gc_report ( 5 , objspace , " gc_verify_internal_consistency: OK \n " ) ;
2014-04-10 14:01:16 +04:00
return Qnil ;
}
2015-03-19 10:19:52 +03:00
void
rb_gc_verify_internal_consistency ( void )
{
gc_verify_internal_consistency ( Qnil ) ;
}
2018-10-31 00:53:56 +03:00
static VALUE
gc_verify_transient_heap_internal_consistency ( VALUE dmy )
{
rb_transient_heap_verify ( ) ;
return Qnil ;
}
2014-09-08 08:11:00 +04:00
/* marks */
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
static void
2014-09-08 08:11:00 +04:00
gc_marks_start ( rb_objspace_t * objspace , int full_mark )
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
{
2014-09-08 08:11:00 +04:00
/* start marking */
gc_report ( 1 , objspace , " gc_marks_start: (%s) \n " , full_mark ? " full " : " minor " ) ;
2016-03-04 12:53:03 +03:00
gc_mode_transition ( objspace , gc_mode_marking ) ;
2013-05-24 14:21:04 +04:00
2013-06-21 03:10:34 +04:00
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
if ( full_mark ) {
2014-11-13 23:16:59 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
2016-01-09 01:15:40 +03:00
objspace - > rincgc . step_slots = ( objspace - > marked_slots * 2 ) / ( ( objspace - > rincgc . pooled_slots / HEAP_PAGE_OBJ_LIMIT ) + 1 ) ;
2014-11-13 23:16:59 +03:00
if ( 0 ) fprintf ( stderr , " objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n " ,
( int ) objspace - > marked_slots , ( int ) objspace - > rincgc . pooled_slots , ( int ) objspace - > rincgc . step_slots ) ;
# endif
2014-09-09 06:45:21 +04:00
objspace - > flags . during_minor_gc = FALSE ;
2014-09-08 08:11:00 +04:00
objspace - > profile . major_gc_count + + ;
2015-03-18 21:02:13 +03:00
objspace - > rgengc . uncollectible_wb_unprotected_objects = 0 ;
2014-09-10 06:35:17 +04:00
objspace - > rgengc . old_objects = 0 ;
2014-09-08 08:11:00 +04:00
objspace - > rgengc . last_major_gc = objspace - > profile . count ;
2014-11-13 23:16:59 +03:00
objspace - > marked_slots = 0 ;
2014-09-08 08:11:00 +04:00
rgengc_mark_and_rememberset_clear ( objspace , heap_eden ) ;
}
else {
2014-09-09 06:45:21 +04:00
objspace - > flags . during_minor_gc = TRUE ;
2014-09-09 14:55:18 +04:00
objspace - > marked_slots =
2015-03-18 21:02:13 +03:00
objspace - > rgengc . old_objects + objspace - > rgengc . uncollectible_wb_unprotected_objects ; /* uncollectible objects are marked already */
2014-09-08 08:11:00 +04:00
objspace - > profile . minor_gc_count + + ;
rgengc_rememberset_mark ( objspace , heap_eden ) ;
}
# endif
2014-11-13 23:16:59 +03:00
2014-09-08 08:11:00 +04:00
gc_mark_roots ( objspace , NULL ) ;
gc_report ( 1 , objspace , " gc_marks_start: (%s) end, stack in %d \n " , full_mark ? " full " : " minor " , ( int ) mark_stack_size ( & objspace - > mark_stack ) ) ;
}
# if GC_ENABLE_INCREMENTAL_MARK
static void
gc_marks_wb_unprotected_objects ( rb_objspace_t * objspace )
{
2018-05-16 23:39:30 +03:00
struct heap_page * page = 0 ;
2014-09-08 08:11:00 +04:00
2018-05-16 23:39:30 +03:00
list_for_each ( & heap_eden - > pages , page , page_node ) {
2014-09-08 08:11:00 +04:00
bits_t * mark_bits = page - > mark_bits ;
bits_t * wbun_bits = page - > wb_unprotected_bits ;
RVALUE * p = page - > start ;
RVALUE * offset = p - NUM_IN_PAGE ( p ) ;
size_t j ;
2016-01-09 01:15:40 +03:00
for ( j = 0 ; j < HEAP_PAGE_BITMAP_LIMIT ; j + + ) {
2014-09-08 08:11:00 +04:00
bits_t bits = mark_bits [ j ] & wbun_bits [ j ] ;
if ( bits ) {
p = offset + j * BITS_BITLENGTH ;
do {
if ( bits & 1 ) {
gc_report ( 2 , objspace , " gc_marks_wb_unprotected_objects: marked shady: %s \n " , obj_info ( ( VALUE ) p ) ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( RVALUE_WB_UNPROTECTED ( ( VALUE ) p ) ) ;
GC_ASSERT ( RVALUE_MARKED ( ( VALUE ) p ) ) ;
2014-09-08 08:11:00 +04:00
gc_mark_children ( objspace , ( VALUE ) p ) ;
}
p + + ;
bits > > = 1 ;
} while ( bits ) ;
}
}
}
gc_mark_stacked_objects_all ( objspace ) ;
}
static struct heap_page *
heap_move_pooled_pages_to_free_pages ( rb_heap_t * heap )
{
struct heap_page * page = heap - > pooled_pages ;
if ( page ) {
heap - > pooled_pages = page - > free_next ;
page - > free_next = heap - > free_pages ;
heap - > free_pages = page ;
}
return page ;
}
2014-11-14 04:44:57 +03:00
# endif
2014-09-08 08:11:00 +04:00
static int
gc_marks_finish ( rb_objspace_t * objspace )
{
# if GC_ENABLE_INCREMENTAL_MARK
/* finish incremental GC */
if ( is_incremental_marking ( objspace ) ) {
if ( heap_eden - > pooled_pages ) {
heap_move_pooled_pages_to_free_pages ( heap_eden ) ;
gc_report ( 1 , objspace , " gc_marks_finish: pooled pages are exists. retry. \n " ) ;
return FALSE ; /* continue marking phase */
}
if ( RGENGC_CHECK_MODE & & is_mark_stack_empty ( & objspace - > mark_stack ) = = 0 ) {
rb_bug ( " gc_marks_finish: mark stack is not empty (%d). " , ( int ) mark_stack_size ( & objspace - > mark_stack ) ) ;
}
gc_mark_roots ( objspace , 0 ) ;
if ( is_mark_stack_empty ( & objspace - > mark_stack ) = = FALSE ) {
gc_report ( 1 , objspace , " gc_marks_finish: not empty (%d). retry. \n " , ( int ) mark_stack_size ( & objspace - > mark_stack ) ) ;
return FALSE ;
}
2013-11-21 08:57:37 +04:00
# if RGENGC_CHECK_MODE >= 2
2014-09-08 08:11:00 +04:00
if ( gc_verify_heap_pages ( objspace ) ! = 0 ) {
rb_bug ( " gc_marks_finish (incremental): there are remembered old objects. " ) ;
}
# endif
2014-09-09 06:45:21 +04:00
objspace - > flags . during_incremental_marking = FALSE ;
2014-09-08 08:11:00 +04:00
/* check children of all marked wb-unprotected objects */
gc_marks_wb_unprotected_objects ( objspace ) ;
}
# endif /* GC_ENABLE_INCREMENTAL_MARK */
# if RGENGC_CHECK_MODE >= 2
gc_verify_internal_consistency ( Qnil ) ;
# endif
# if USE_RGENGC
if ( is_full_marking ( objspace ) ) {
/* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
const double r = gc_params . oldobject_limit_factor ;
2015-03-18 21:02:13 +03:00
objspace - > rgengc . uncollectible_wb_unprotected_objects_limit = ( size_t ) ( objspace - > rgengc . uncollectible_wb_unprotected_objects * r ) ;
2014-09-10 06:35:17 +04:00
objspace - > rgengc . old_objects_limit = ( size_t ) ( objspace - > rgengc . old_objects * r ) ;
2014-09-08 08:11:00 +04:00
}
# endif
# if RGENGC_CHECK_MODE >= 4
gc_marks_check ( objspace , gc_check_after_marks_i , " after_marks " ) ;
# endif
2016-03-31 11:21:35 +03:00
{
/* decide full GC is needed or not */
2014-09-08 08:11:00 +04:00
rb_heap_t * heap = heap_eden ;
2016-03-31 11:21:35 +03:00
size_t total_slots = heap_allocatable_pages * HEAP_PAGE_OBJ_LIMIT + heap - > total_slots ;
size_t sweep_slots = total_slots - objspace - > marked_slots ; /* will be swept slots */
size_t max_free_slots = ( size_t ) ( total_slots * gc_params . heap_free_slots_max_ratio ) ;
size_t min_free_slots = ( size_t ) ( total_slots * gc_params . heap_free_slots_min_ratio ) ;
int full_marking = is_full_marking ( objspace ) ;
2014-09-08 08:11:00 +04:00
2017-06-22 08:03:18 +03:00
GC_ASSERT ( heap - > total_slots > = objspace - > marked_slots ) ;
2014-09-08 08:11:00 +04:00
2016-03-31 11:21:35 +03:00
/* setup free-able page counts */
if ( max_free_slots < gc_params . heap_init_slots ) max_free_slots = gc_params . heap_init_slots ;
if ( sweep_slots > max_free_slots ) {
heap_pages_freeable_pages = ( sweep_slots - max_free_slots ) / HEAP_PAGE_OBJ_LIMIT ;
}
else {
heap_pages_freeable_pages = 0 ;
}
/* check free_min */
if ( min_free_slots < gc_params . heap_free_slots ) min_free_slots = gc_params . heap_free_slots ;
2014-09-08 08:11:00 +04:00
# if USE_RGENGC
2016-03-31 11:49:09 +03:00
if ( sweep_slots < min_free_slots ) {
2016-03-31 11:21:35 +03:00
if ( ! full_marking ) {
if ( objspace - > profile . count - objspace - > rgengc . last_major_gc < RVALUE_OLD_AGE ) {
full_marking = TRUE ;
/* do not update last_major_gc, because full marking is not done. */
goto increment ;
}
else {
gc_report ( 1 , objspace , " gc_marks_finish: next is full GC!!) \n " ) ;
objspace - > rgengc . need_major_gc | = GPR_FLAG_MAJOR_BY_NOFREE ;
}
2014-09-08 08:11:00 +04:00
}
else {
2016-03-31 11:21:35 +03:00
increment :
2014-09-08 08:11:00 +04:00
gc_report ( 1 , objspace , " gc_marks_finish: heap_set_increment!! \n " ) ;
2016-03-31 11:21:35 +03:00
heap_set_increment ( objspace , heap_extend_pages ( objspace , sweep_slots , total_slots ) ) ;
2014-09-08 08:11:00 +04:00
heap_increment ( objspace , heap ) ;
2014-02-17 07:27:13 +04:00
}
2013-05-24 14:21:04 +04:00
}
2014-09-08 08:11:00 +04:00
2016-03-31 11:21:35 +03:00
if ( full_marking ) {
/* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
const double r = gc_params . oldobject_limit_factor ;
objspace - > rgengc . uncollectible_wb_unprotected_objects_limit = ( size_t ) ( objspace - > rgengc . uncollectible_wb_unprotected_objects * r ) ;
objspace - > rgengc . old_objects_limit = ( size_t ) ( objspace - > rgengc . old_objects * r ) ;
}
2015-03-18 21:02:13 +03:00
if ( objspace - > rgengc . uncollectible_wb_unprotected_objects > objspace - > rgengc . uncollectible_wb_unprotected_objects_limit ) {
2014-09-08 08:11:00 +04:00
objspace - > rgengc . need_major_gc | = GPR_FLAG_MAJOR_BY_SHADY ;
}
2014-09-10 06:35:17 +04:00
if ( objspace - > rgengc . old_objects > objspace - > rgengc . old_objects_limit ) {
2014-09-08 08:11:00 +04:00
objspace - > rgengc . need_major_gc | = GPR_FLAG_MAJOR_BY_OLDGEN ;
}
2014-11-13 23:16:59 +03:00
if ( RGENGC_FORCE_MAJOR_GC ) {
objspace - > rgengc . need_major_gc = GPR_FLAG_MAJOR_BY_FORCE ;
}
2014-09-08 08:11:00 +04:00
gc_report ( 1 , objspace , " gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s) \n " ,
2014-09-10 06:35:17 +04:00
( int ) objspace - > marked_slots , ( int ) objspace - > rgengc . old_objects , ( int ) heap - > total_slots , ( int ) sweep_slots , ( int ) heap_allocatable_pages ,
2014-09-08 08:11:00 +04:00
objspace - > rgengc . need_major_gc ? " major " : " minor " ) ;
2016-03-31 11:21:35 +03:00
# else /* USE_RGENGC */
2016-03-31 11:49:09 +03:00
if ( sweep_slots < min_free_slots ) {
2016-03-31 11:21:35 +03:00
gc_report ( 1 , objspace , " gc_marks_finish: heap_set_increment!! \n " ) ;
heap_set_increment ( objspace , heap_extend_pages ( objspace , sweep_slot , total_slot ) ) ;
heap_increment ( objspace , heap ) ;
}
2016-03-31 11:49:09 +03:00
# endif
2014-09-08 08:11:00 +04:00
}
2018-10-31 00:53:56 +03:00
rb_transient_heap_finish_marking ( ) ;
2014-09-08 08:11:00 +04:00
gc_event_hook ( objspace , RUBY_INTERNAL_EVENT_GC_END_MARK , 0 ) ;
return TRUE ;
}
static void
gc_marks_step ( rb_objspace_t * objspace , int slots )
{
2018-06-05 22:53:09 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
2017-06-22 08:03:18 +03:00
GC_ASSERT ( is_marking ( objspace ) ) ;
2014-09-08 08:11:00 +04:00
if ( gc_mark_stacked_objects_incremental ( objspace , slots ) ) {
if ( gc_marks_finish ( objspace ) ) {
/* finish */
gc_sweep ( objspace ) ;
}
}
2014-11-13 23:16:59 +03:00
if ( 0 ) fprintf ( stderr , " objspace->marked_slots: %d \n " , ( int ) objspace - > marked_slots ) ;
2014-11-14 04:44:57 +03:00
# endif
2018-06-05 22:53:09 +03:00
}
2014-09-08 08:11:00 +04:00
static void
gc_marks_rest ( rb_objspace_t * objspace )
{
gc_report ( 1 , objspace , " gc_marks_rest \n " ) ;
2014-11-14 04:44:57 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
2014-09-08 08:11:00 +04:00
heap_eden - > pooled_pages = NULL ;
2014-11-14 04:44:57 +03:00
# endif
2014-09-08 08:11:00 +04:00
if ( is_incremental_marking ( objspace ) ) {
do {
while ( gc_mark_stacked_objects_incremental ( objspace , INT_MAX ) = = FALSE ) ;
} while ( gc_marks_finish ( objspace ) = = FALSE ) ;
}
else {
gc_mark_stacked_objects_all ( objspace ) ;
gc_marks_finish ( objspace ) ;
}
/* move to sweep */
gc_sweep ( objspace ) ;
}
static void
gc_marks_continue ( rb_objspace_t * objspace , rb_heap_t * heap )
{
2017-06-22 08:03:18 +03:00
GC_ASSERT ( dont_gc = = FALSE ) ;
2018-06-05 22:53:09 +03:00
# if GC_ENABLE_INCREMENTAL_MARK
2014-09-08 08:11:00 +04:00
gc_enter ( objspace , " marks_continue " ) ;
PUSH_MARK_FUNC_DATA ( NULL ) ;
{
2018-06-05 22:53:09 +03:00
int slots = 0 ;
const char * from ;
2014-09-08 08:11:00 +04:00
if ( heap - > pooled_pages ) {
2016-01-09 01:15:40 +03:00
while ( heap - > pooled_pages & & slots < HEAP_PAGE_OBJ_LIMIT ) {
2014-09-08 08:11:00 +04:00
struct heap_page * page = heap_move_pooled_pages_to_free_pages ( heap ) ;
slots + = page - > free_slots ;
}
from = " pooled-pages " ;
}
else if ( heap_increment ( objspace , heap ) ) {
slots = heap - > free_pages - > free_slots ;
from = " incremented-pages " ;
}
if ( slots > 0 ) {
gc_report ( 2 , objspace , " gc_marks_continue: provide %d slots from %s. \n " , slots , from ) ;
2014-11-14 02:12:45 +03:00
gc_marks_step ( objspace , ( int ) objspace - > rincgc . step_slots ) ;
2014-09-08 08:11:00 +04:00
}
else {
gc_report ( 2 , objspace , " gc_marks_continue: no more pooled pages (stack depth: %d). \n " , ( int ) mark_stack_size ( & objspace - > mark_stack ) ) ;
gc_marks_rest ( objspace ) ;
}
}
POP_MARK_FUNC_DATA ( ) ;
gc_exit ( objspace , " marks_continue " ) ;
2014-11-14 04:44:57 +03:00
# endif
2018-06-05 22:53:09 +03:00
}
2014-09-08 08:11:00 +04:00
static void
gc_marks ( rb_objspace_t * objspace , int full_mark )
{
gc_prof_mark_timer_start ( objspace ) ;
PUSH_MARK_FUNC_DATA ( NULL ) ;
{
/* setup marking */
# if USE_RGENGC
gc_marks_start ( objspace , full_mark ) ;
if ( ! is_incremental_marking ( objspace ) ) {
gc_marks_rest ( objspace ) ;
2013-05-21 11:52:03 +04:00
}
2013-06-22 02:29:09 +04:00
# if RGENGC_PROFILE > 0
2013-09-26 06:31:26 +04:00
if ( gc_prof_record ( objspace ) ) {
gc_profile_record * record = gc_prof_record ( objspace ) ;
2014-09-10 06:35:17 +04:00
record - > old_objects = objspace - > rgengc . old_objects ;
2013-09-26 06:31:26 +04:00
}
2013-06-22 02:29:09 +04:00
# endif
2013-06-21 03:10:34 +04:00
# else /* USE_RGENGC */
2014-09-08 08:11:00 +04:00
gc_marks_start ( objspace , TRUE ) ;
gc_marks_rest ( objspace ) ;
2013-06-21 03:10:34 +04:00
# endif
2013-05-21 11:52:03 +04:00
}
2014-09-08 08:11:00 +04:00
POP_MARK_FUNC_DATA ( ) ;
2013-05-21 11:52:03 +04:00
gc_prof_mark_timer_stop ( objspace ) ;
2010-05-28 15:13:42 +04:00
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
/* RGENGC */
2013-07-18 03:19:38 +04:00
static void
2014-09-08 08:11:00 +04:00
gc_report_body ( int level , rb_objspace_t * objspace , const char * fmt , . . . )
2013-07-18 03:19:38 +04:00
{
if ( level < = RGENGC_DEBUG ) {
char buf [ 1024 ] ;
FILE * out = stderr ;
va_list args ;
const char * status = " " ;
# if USE_RGENGC
if ( during_gc ) {
2014-09-08 08:11:00 +04:00
status = is_full_marking ( objspace ) ? " + " : " - " ;
}
else {
if ( is_lazy_sweeping ( heap_eden ) ) {
status = " S " ;
}
if ( is_incremental_marking ( objspace ) ) {
status = " M " ;
}
2013-07-18 03:19:38 +04:00
}
# endif
va_start ( args , fmt ) ;
vsnprintf ( buf , 1024 , fmt , args ) ;
va_end ( args ) ;
fprintf ( out , " %s| " , status ) ;
fputs ( buf , out ) ;
}
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if USE_RGENGC
/* bit operations */
static int
rgengc_remembersetbits_get ( rb_objspace_t * objspace , VALUE obj )
{
2014-09-08 08:11:00 +04:00
return RVALUE_REMEMBERED ( obj ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2013-06-25 07:24:07 +04:00
static int
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
rgengc_remembersetbits_set ( rb_objspace_t * objspace , VALUE obj )
{
2014-09-08 08:11:00 +04:00
struct heap_page * page = GET_HEAP_PAGE ( obj ) ;
bits_t * bits = & page - > marking_bits [ 0 ] ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( ! is_incremental_marking ( objspace ) ) ;
2014-09-08 08:11:00 +04:00
2013-06-25 07:24:07 +04:00
if ( MARKED_IN_BITMAP ( bits , obj ) ) {
return FALSE ;
}
else {
2014-09-08 08:11:00 +04:00
page - > flags . has_remembered_objects = TRUE ;
2013-06-25 07:24:07 +04:00
MARK_IN_BITMAP ( bits , obj ) ;
return TRUE ;
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
/* wb, etc */
2013-06-25 07:24:07 +04:00
/* return FALSE if already remembered */
static int
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
rgengc_remember ( rb_objspace_t * objspace , VALUE obj )
{
2014-09-08 08:11:00 +04:00
gc_report ( 6 , objspace , " rgengc_remember: %s %s \n " , obj_info ( obj ) ,
rgengc_remembersetbits_get ( objspace , obj ) ? " was already remembered " : " is remembered now " ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
2013-06-19 02:07:09 +04:00
2014-09-10 02:32:09 +04:00
if ( RGENGC_CHECK_MODE ) {
if ( RVALUE_WB_UNPROTECTED ( obj ) ) rb_bug ( " rgengc_remember: %s is not wb protected. " , obj_info ( obj ) ) ;
}
2013-11-04 22:59:33 +04:00
# if RGENGC_PROFILE > 0
2014-09-10 02:32:09 +04:00
if ( ! rgengc_remembered ( objspace , obj ) ) {
if ( RVALUE_WB_UNPROTECTED ( obj ) = = 0 ) {
objspace - > profile . total_remembered_normal_object_count + + ;
2013-05-26 20:43:21 +04:00
# if RGENGC_PROFILE >= 2
2014-09-10 02:32:09 +04:00
objspace - > profile . remembered_normal_object_count_types [ BUILTIN_TYPE ( obj ) ] + + ;
2013-05-26 20:43:21 +04:00
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
}
2014-09-10 02:32:09 +04:00
# endif /* RGENGC_PROFILE > 0 */
2014-09-08 08:11:00 +04:00
2013-06-25 07:24:07 +04:00
return rgengc_remembersetbits_set ( objspace , obj ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
static int
rgengc_remembered ( rb_objspace_t * objspace , VALUE obj )
{
int result = rgengc_remembersetbits_get ( objspace , obj ) ;
2014-09-08 08:11:00 +04:00
check_rvalue_consistency ( obj ) ;
gc_report ( 6 , objspace , " rgengc_remembered: %s \n " , obj_info ( obj ) ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
return result ;
}
2014-09-08 08:11:00 +04:00
# ifndef PROFILE_REMEMBERSET_MARK
# define PROFILE_REMEMBERSET_MARK 0
# endif
2013-06-25 07:24:07 +04:00
static void
2013-10-22 14:28:31 +04:00
rgengc_rememberset_mark ( rb_objspace_t * objspace , rb_heap_t * heap )
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
{
2013-10-22 14:28:31 +04:00
size_t j ;
2018-05-16 23:39:30 +03:00
struct heap_page * page = 0 ;
2014-09-08 08:11:00 +04:00
# if PROFILE_REMEMBERSET_MARK
int has_old = 0 , has_shady = 0 , has_both = 0 , skip = 0 ;
2013-06-25 07:24:07 +04:00
# endif
2014-09-08 08:11:00 +04:00
gc_report ( 1 , objspace , " rgengc_rememberset_mark: start \n " ) ;
2013-06-25 07:24:07 +04:00
2018-05-16 23:39:30 +03:00
list_for_each ( & heap - > pages , page , page_node ) {
2015-03-18 21:02:13 +03:00
if ( page - > flags . has_remembered_objects | page - > flags . has_uncollectible_shady_objects ) {
2014-09-08 08:11:00 +04:00
RVALUE * p = page - > start ;
RVALUE * offset = p - NUM_IN_PAGE ( p ) ;
2016-01-09 01:15:40 +03:00
bits_t bitset , bits [ HEAP_PAGE_BITMAP_LIMIT ] ;
2014-09-08 08:11:00 +04:00
bits_t * marking_bits = page - > marking_bits ;
2015-03-18 21:02:13 +03:00
bits_t * uncollectible_bits = page - > uncollectible_bits ;
2014-09-08 08:11:00 +04:00
bits_t * wb_unprotected_bits = page - > wb_unprotected_bits ;
# if PROFILE_REMEMBERSET_MARK
2015-03-18 21:02:13 +03:00
if ( page - > flags . has_remembered_objects & & page - > flags . has_uncollectible_shady_objects ) has_both + + ;
2014-09-08 08:11:00 +04:00
else if ( page - > flags . has_remembered_objects ) has_old + + ;
2015-03-18 21:02:13 +03:00
else if ( page - > flags . has_uncollectible_shady_objects ) has_shady + + ;
2014-09-08 08:11:00 +04:00
# endif
2016-01-09 01:15:40 +03:00
for ( j = 0 ; j < HEAP_PAGE_BITMAP_LIMIT ; j + + ) {
2015-03-18 21:02:13 +03:00
bits [ j ] = marking_bits [ j ] | ( uncollectible_bits [ j ] & wb_unprotected_bits [ j ] ) ;
2014-09-08 08:11:00 +04:00
marking_bits [ j ] = 0 ;
}
page - > flags . has_remembered_objects = FALSE ;
2013-06-05 19:57:12 +04:00
2016-01-09 01:15:40 +03:00
for ( j = 0 ; j < HEAP_PAGE_BITMAP_LIMIT ; j + + ) {
2013-06-05 19:57:12 +04:00
bitset = bits [ j ] ;
2014-09-08 08:11:00 +04:00
if ( bitset ) {
p = offset + j * BITS_BITLENGTH ;
do {
if ( bitset & 1 ) {
VALUE obj = ( VALUE ) p ;
gc_report ( 2 , objspace , " rgengc_rememberset_mark: mark %s \n " , obj_info ( obj ) ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( RVALUE_UNCOLLECTIBLE ( obj ) ) ;
GC_ASSERT ( RVALUE_OLD_P ( obj ) | | RVALUE_WB_UNPROTECTED ( obj ) ) ;
2013-11-04 22:59:33 +04:00
2014-09-08 08:11:00 +04:00
gc_mark_children ( objspace , obj ) ;
}
p + + ;
bitset > > = 1 ;
} while ( bitset ) ;
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
}
2014-09-08 08:11:00 +04:00
# if PROFILE_REMEMBERSET_MARK
else {
skip + + ;
}
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2014-09-08 08:11:00 +04:00
# if PROFILE_REMEMBERSET_MARK
fprintf ( stderr , " %d \t %d \t %d \t %d \n " , has_both , has_old , has_shady , skip ) ;
2013-06-20 16:20:27 +04:00
# endif
2014-09-08 08:11:00 +04:00
gc_report ( 1 , objspace , " rgengc_rememberset_mark: finished \n " ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
static void
2013-10-22 14:28:31 +04:00
rgengc_mark_and_rememberset_clear ( rb_objspace_t * objspace , rb_heap_t * heap )
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
{
2018-05-16 23:39:30 +03:00
struct heap_page * page = 0 ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2018-05-16 23:39:30 +03:00
list_for_each ( & heap - > pages , page , page_node ) {
2016-01-09 01:15:40 +03:00
memset ( & page - > mark_bits [ 0 ] , 0 , HEAP_PAGE_BITMAP_SIZE ) ;
memset ( & page - > marking_bits [ 0 ] , 0 , HEAP_PAGE_BITMAP_SIZE ) ;
memset ( & page - > uncollectible_bits [ 0 ] , 0 , HEAP_PAGE_BITMAP_SIZE ) ;
2015-03-18 21:02:13 +03:00
page - > flags . has_uncollectible_shady_objects = FALSE ;
2014-09-08 08:11:00 +04:00
page - > flags . has_remembered_objects = FALSE ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
}
/* RGENGC: APIs */
2015-11-19 14:13:27 +03:00
NOINLINE ( static void gc_writebarrier_generational ( VALUE a , VALUE b , rb_objspace_t * objspace ) ) ;
2014-09-08 08:11:00 +04:00
2015-03-17 13:26:39 +03:00
static void
2015-11-19 14:13:27 +03:00
gc_writebarrier_generational ( VALUE a , VALUE b , rb_objspace_t * objspace )
2015-03-17 13:26:39 +03:00
{
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
if ( RGENGC_CHECK_MODE ) {
2015-03-18 12:02:10 +03:00
if ( ! RVALUE_OLD_P ( a ) ) rb_bug ( " gc_writebarrier_generational: %s is not an old object. " , obj_info ( a ) ) ;
if ( RVALUE_OLD_P ( b ) ) rb_bug ( " gc_writebarrier_generational: %s is an old object. " , obj_info ( b ) ) ;
2015-03-24 13:12:24 +03:00
if ( is_incremental_marking ( objspace ) ) rb_bug ( " gc_writebarrier_generational: called while incremental marking: %s -> %s " , obj_info ( a ) , obj_info ( b ) ) ;
2013-11-04 22:59:33 +04:00
}
2015-03-18 12:02:10 +03:00
# if 1
2015-12-14 05:51:13 +03:00
/* mark `a' and remember (default behavior) */
2014-09-08 08:11:00 +04:00
if ( ! rgengc_remembered ( objspace , a ) ) {
rgengc_remember ( objspace , a ) ;
2015-03-24 13:12:24 +03:00
gc_report ( 1 , objspace , " gc_writebarrier_generational: %s (remembered) -> %s \n " , obj_info ( a ) , obj_info ( b ) ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2015-03-18 12:02:10 +03:00
# else
/* mark `b' and remember */
MARK_IN_BITMAP ( GET_HEAP_MARK_BITS ( b ) , b ) ;
if ( RVALUE_WB_UNPROTECTED ( b ) ) {
gc_remember_unprotected ( objspace , b ) ;
}
else {
RVALUE_AGE_SET_OLD ( objspace , b ) ;
rgengc_remember ( objspace , b ) ;
}
gc_report ( 1 , objspace , " gc_writebarrier_generational: %s -> %s (remembered) \n " , obj_info ( a ) , obj_info ( b ) ) ;
# endif
check_rvalue_consistency ( a ) ;
check_rvalue_consistency ( b ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2014-09-08 08:11:00 +04:00
# if GC_ENABLE_INCREMENTAL_MARK
static void
gc_mark_from ( rb_objspace_t * objspace , VALUE obj , VALUE parent )
{
gc_mark_set_parent ( objspace , parent ) ;
rgengc_check_relation ( objspace , obj ) ;
2014-09-11 14:23:36 +04:00
if ( gc_mark_set ( objspace , obj ) = = FALSE ) return ;
2014-09-08 08:11:00 +04:00
gc_aging ( objspace , obj ) ;
gc_grey ( objspace , obj ) ;
}
2015-11-19 14:13:27 +03:00
NOINLINE ( static void gc_writebarrier_incremental ( VALUE a , VALUE b , rb_objspace_t * objspace ) ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2015-03-17 13:26:39 +03:00
static void
2015-11-19 14:13:27 +03:00
gc_writebarrier_incremental ( VALUE a , VALUE b , rb_objspace_t * objspace )
2015-03-17 13:26:39 +03:00
{
fix SEGV inspecting uninitialized objects
obj_info() assumes the given object is alive. OTOH
gc_writebarrier_incremental is called before or in middle of
object initialization. Can casue SEGV.
(lldb) run
Process 48188 launched: './miniruby' (x86_64)
Process 48188 stopped
* thread #1: tid = 0x30fd53, 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=EXC_I386_GPFLT)
frame #0: 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072
2069 static inline const VALUE *
2070 rb_array_const_ptr(VALUE a)
2071 {
-> 2072 return FIX_CONST_VALUE_PTR((RBASIC(a)->flags & RARRAY_EMBED_FLAG) ?
2073 RARRAY(a)->as.ary : RARRAY(a)->as.heap.ptr);
2074 }
2075
(lldb) bt
* thread #1: tid = 0x30fd53, 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=EXC_I386_GPFLT)
* frame #0: 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072
frame #1: 0x00000001000bfaab miniruby`pathobj_path(pathobj=5251291222225483145) + 70 at vm_core.h:269
frame #2: 0x00000001000c25ff miniruby`rb_iseq_path(iseq=0x00000001025b71a8) + 32 at iseq.c:723
frame #3: 0x000000010009db09 miniruby`rb_raw_iseq_info(buff="0x00000001025b7158 [0 ] proc (Proc)", buff_size=256, iseq=0x00000001025b71a8) + 69 at gc.c:9274
frame #4: 0x000000010009e1d5 miniruby`rb_raw_obj_info(buff="0x00000001025b7158 [0 ] proc (Proc)", buff_size=256, obj=4334514520) + 1546 at gc.c:9351
frame #5: 0x000000010009e4d5 miniruby`obj_info(obj=4334514520) + 98 at gc.c:9429
frame #6: 0x0000000100096658 miniruby`gc_writebarrier_incremental(a=4334514520, b=4334514600, objspace=0x00000001007d3280) + 61 at gc.c:5963
frame #7: 0x00000001000968ca miniruby`rb_gc_writebarrier(a=4334514520, b=4334514600) + 127 at gc.c:6009
frame #8: 0x00000001001eabe0 miniruby`rb_obj_written(a=4334514520, oldv=52, b=4334514600, filename="/Users/urabe.shyouhei/data/src/pedantic/vm.c", line=821) + 72 at ruby.h:1472
frame #9: 0x00000001001eac2c miniruby`rb_obj_write(a=4334514520, slot=0x000000010259ff10, b=4334514600, filename="/Users/urabe.shyouhei/data/src/pedantic/vm.c", line=821) + 70 at ruby.h:1489
frame #10: 0x0000000100208b6f miniruby`vm_proc_create_from_captured(klass=4311027960, captured=0x0000000102500338, block_type=block_type_ifunc, is_from_method='\0', is_lambda='\x01') + 137 at vm.c:821
frame #11: 0x0000000100208e5c miniruby`rb_vm_make_proc_lambda(ec=0x00000001007d3548, captured=0x0000000102500338, klass=4311027960, is_lambda='\x01') + 134 at vm.c:892
frame #12: 0x000000010011f08e miniruby`proc_new(klass=4311027960, is_lambda='\x01') + 445 at proc.c:752
frame #13: 0x000000010011f110 miniruby`rb_block_lambda + 27 at proc.c:808
frame #14: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`rb_block_lambda at proc.c:807), recv=4310991600, argc=0, argv=0x0000000000000000) + 41 at vm_insnhelper.c:1729
frame #15: 0x00000001002033de miniruby`vm_call0_cfunc_with_frame(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 370 at vm_eval.c:85
frame #16: 0x00000001002034d9 miniruby`vm_call0_cfunc(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 59 at vm_eval.c:100
frame #17: 0x000000010020368f miniruby`vm_call0_body(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 436 at vm_eval.c:131
frame #18: 0x000000010020326a miniruby`vm_call0(ec=0x00000001007d3548, recv=4310991600, id=2993, argc=0, argv=0x0000000000000000, me=0x0000000100f48110) + 142 at vm_eval.c:58
frame #19: 0x0000000100203c60 miniruby`rb_call0(ec=0x00000001007d3548, recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000, scope=CALL_FCALL, self=4334514640) + 166 at vm_eval.c:296
frame #20: 0x0000000100204827 miniruby`rb_call(recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) + 84 at vm_eval.c:589
frame #21: 0x000000010020518b miniruby`rb_funcallv(recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000) + 52 at vm_eval.c:815
frame #22: 0x000000010012242e miniruby`mlambda(method=0) + 45 at proc.c:2661
frame #23: 0x0000000100205bac miniruby`rb_iterate0(it_proc=(miniruby`mlambda at proc.c:2660), data1=0, ifunc=0x00000001025b71a8, ec=0x00000001007d3548) + 380 at vm_eval.c:1134
frame #24: 0x0000000100205d16 miniruby`rb_iterate(it_proc=(miniruby`mlambda at proc.c:2660), data1=0, bl_proc=(miniruby`bmcall at proc.c:2666), data2=4334514640) + 88 at vm_eval.c:1166
frame #25: 0x00000001001224c7 miniruby`method_to_proc(method=4334514640) + 43 at proc.c:2701
frame #26: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`method_to_proc at proc.c:2688), recv=4334514640, argc=0, argv=0x0000000102400568) + 41 at vm_insnhelper.c:1729
frame #27: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 386 at vm_insnhelper.c:1918
frame #28: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 149 at vm_insnhelper.c:1934
frame #29: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 239 at vm_insnhelper.c:2232
frame #30: 0x00000001001f49a4 miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 117 at vm_insnhelper.c:2355
frame #31: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 59 at vm_insnhelper.c:2398
frame #32: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #33: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #34: 0x000000010020c3d1 miniruby`rb_iseq_eval(iseq=0x00000001007f8270) + 52 at vm.c:2008
frame #35: 0x00000001000caa4a miniruby`rb_load_internal0(ec=0x00000001007d3548, fname=4310799960, wrap=0) + 631 at load.c:611
frame #36: 0x00000001000cab36 miniruby`rb_load_internal(fname=4310799960, wrap=0) + 46 at load.c:642
frame #37: 0x00000001000cae1d miniruby`rb_f_load(argc=1, argv=0x00000001024004b8) + 217 at load.c:710
frame #38: 0x00000001001f247c miniruby`call_cfunc_m1(func=(miniruby`rb_f_load at load.c:695), recv=4311327440, argc=1, argv=0x00000001024004b8) + 47 at vm_insnhelper.c:1723
frame #39: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 386 at vm_insnhelper.c:1918
frame #40: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 149 at vm_insnhelper.c:1934
frame #41: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 239 at vm_insnhelper.c:2232
frame #42: 0x00000001001f4a2c miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 253 at vm_insnhelper.c:2366
frame #43: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 59 at vm_insnhelper.c:2398
frame #44: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #45: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #46: 0x000000010020c40f miniruby`rb_iseq_eval_main(iseq=0x0000000100f21240) + 52 at vm.c:2019
frame #47: 0x000000010007c774 miniruby`ruby_exec_internal(n=0x0000000100f21240) + 297 at eval.c:246
frame #48: 0x000000010007c89a miniruby`ruby_exec_node(n=0x0000000100f21240) + 36 at eval.c:310
frame #49: 0x000000010007c86d miniruby`ruby_run_node(n=0x0000000100f21240) + 62 at eval.c:302
frame #50: 0x0000000100001399 miniruby`main(argc=9, argv=0x00007fff5fbfdae0) + 113 at main.c:42
frame #51: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61569 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:05 +03:00
gc_report ( 2 , objspace , " gc_writebarrier_incremental: [LG] %p -> %s \n " , ( void * ) a , obj_info ( b ) ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2015-03-17 13:26:39 +03:00
if ( RVALUE_BLACK_P ( a ) ) {
if ( RVALUE_WHITE_P ( b ) ) {
if ( ! RVALUE_WB_UNPROTECTED ( a ) ) {
fix SEGV inspecting uninitialized objects
obj_info() assumes the given object is alive. OTOH
gc_writebarrier_incremental is called before or in middle of
object initialization. Can casue SEGV.
(lldb) run
Process 48188 launched: './miniruby' (x86_64)
Process 48188 stopped
* thread #1: tid = 0x30fd53, 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=EXC_I386_GPFLT)
frame #0: 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072
2069 static inline const VALUE *
2070 rb_array_const_ptr(VALUE a)
2071 {
-> 2072 return FIX_CONST_VALUE_PTR((RBASIC(a)->flags & RARRAY_EMBED_FLAG) ?
2073 RARRAY(a)->as.ary : RARRAY(a)->as.heap.ptr);
2074 }
2075
(lldb) bt
* thread #1: tid = 0x30fd53, 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=EXC_I386_GPFLT)
* frame #0: 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072
frame #1: 0x00000001000bfaab miniruby`pathobj_path(pathobj=5251291222225483145) + 70 at vm_core.h:269
frame #2: 0x00000001000c25ff miniruby`rb_iseq_path(iseq=0x00000001025b71a8) + 32 at iseq.c:723
frame #3: 0x000000010009db09 miniruby`rb_raw_iseq_info(buff="0x00000001025b7158 [0 ] proc (Proc)", buff_size=256, iseq=0x00000001025b71a8) + 69 at gc.c:9274
frame #4: 0x000000010009e1d5 miniruby`rb_raw_obj_info(buff="0x00000001025b7158 [0 ] proc (Proc)", buff_size=256, obj=4334514520) + 1546 at gc.c:9351
frame #5: 0x000000010009e4d5 miniruby`obj_info(obj=4334514520) + 98 at gc.c:9429
frame #6: 0x0000000100096658 miniruby`gc_writebarrier_incremental(a=4334514520, b=4334514600, objspace=0x00000001007d3280) + 61 at gc.c:5963
frame #7: 0x00000001000968ca miniruby`rb_gc_writebarrier(a=4334514520, b=4334514600) + 127 at gc.c:6009
frame #8: 0x00000001001eabe0 miniruby`rb_obj_written(a=4334514520, oldv=52, b=4334514600, filename="/Users/urabe.shyouhei/data/src/pedantic/vm.c", line=821) + 72 at ruby.h:1472
frame #9: 0x00000001001eac2c miniruby`rb_obj_write(a=4334514520, slot=0x000000010259ff10, b=4334514600, filename="/Users/urabe.shyouhei/data/src/pedantic/vm.c", line=821) + 70 at ruby.h:1489
frame #10: 0x0000000100208b6f miniruby`vm_proc_create_from_captured(klass=4311027960, captured=0x0000000102500338, block_type=block_type_ifunc, is_from_method='\0', is_lambda='\x01') + 137 at vm.c:821
frame #11: 0x0000000100208e5c miniruby`rb_vm_make_proc_lambda(ec=0x00000001007d3548, captured=0x0000000102500338, klass=4311027960, is_lambda='\x01') + 134 at vm.c:892
frame #12: 0x000000010011f08e miniruby`proc_new(klass=4311027960, is_lambda='\x01') + 445 at proc.c:752
frame #13: 0x000000010011f110 miniruby`rb_block_lambda + 27 at proc.c:808
frame #14: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`rb_block_lambda at proc.c:807), recv=4310991600, argc=0, argv=0x0000000000000000) + 41 at vm_insnhelper.c:1729
frame #15: 0x00000001002033de miniruby`vm_call0_cfunc_with_frame(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 370 at vm_eval.c:85
frame #16: 0x00000001002034d9 miniruby`vm_call0_cfunc(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 59 at vm_eval.c:100
frame #17: 0x000000010020368f miniruby`vm_call0_body(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 436 at vm_eval.c:131
frame #18: 0x000000010020326a miniruby`vm_call0(ec=0x00000001007d3548, recv=4310991600, id=2993, argc=0, argv=0x0000000000000000, me=0x0000000100f48110) + 142 at vm_eval.c:58
frame #19: 0x0000000100203c60 miniruby`rb_call0(ec=0x00000001007d3548, recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000, scope=CALL_FCALL, self=4334514640) + 166 at vm_eval.c:296
frame #20: 0x0000000100204827 miniruby`rb_call(recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) + 84 at vm_eval.c:589
frame #21: 0x000000010020518b miniruby`rb_funcallv(recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000) + 52 at vm_eval.c:815
frame #22: 0x000000010012242e miniruby`mlambda(method=0) + 45 at proc.c:2661
frame #23: 0x0000000100205bac miniruby`rb_iterate0(it_proc=(miniruby`mlambda at proc.c:2660), data1=0, ifunc=0x00000001025b71a8, ec=0x00000001007d3548) + 380 at vm_eval.c:1134
frame #24: 0x0000000100205d16 miniruby`rb_iterate(it_proc=(miniruby`mlambda at proc.c:2660), data1=0, bl_proc=(miniruby`bmcall at proc.c:2666), data2=4334514640) + 88 at vm_eval.c:1166
frame #25: 0x00000001001224c7 miniruby`method_to_proc(method=4334514640) + 43 at proc.c:2701
frame #26: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`method_to_proc at proc.c:2688), recv=4334514640, argc=0, argv=0x0000000102400568) + 41 at vm_insnhelper.c:1729
frame #27: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 386 at vm_insnhelper.c:1918
frame #28: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 149 at vm_insnhelper.c:1934
frame #29: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 239 at vm_insnhelper.c:2232
frame #30: 0x00000001001f49a4 miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 117 at vm_insnhelper.c:2355
frame #31: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 59 at vm_insnhelper.c:2398
frame #32: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #33: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #34: 0x000000010020c3d1 miniruby`rb_iseq_eval(iseq=0x00000001007f8270) + 52 at vm.c:2008
frame #35: 0x00000001000caa4a miniruby`rb_load_internal0(ec=0x00000001007d3548, fname=4310799960, wrap=0) + 631 at load.c:611
frame #36: 0x00000001000cab36 miniruby`rb_load_internal(fname=4310799960, wrap=0) + 46 at load.c:642
frame #37: 0x00000001000cae1d miniruby`rb_f_load(argc=1, argv=0x00000001024004b8) + 217 at load.c:710
frame #38: 0x00000001001f247c miniruby`call_cfunc_m1(func=(miniruby`rb_f_load at load.c:695), recv=4311327440, argc=1, argv=0x00000001024004b8) + 47 at vm_insnhelper.c:1723
frame #39: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 386 at vm_insnhelper.c:1918
frame #40: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 149 at vm_insnhelper.c:1934
frame #41: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 239 at vm_insnhelper.c:2232
frame #42: 0x00000001001f4a2c miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 253 at vm_insnhelper.c:2366
frame #43: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 59 at vm_insnhelper.c:2398
frame #44: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #45: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #46: 0x000000010020c40f miniruby`rb_iseq_eval_main(iseq=0x0000000100f21240) + 52 at vm.c:2019
frame #47: 0x000000010007c774 miniruby`ruby_exec_internal(n=0x0000000100f21240) + 297 at eval.c:246
frame #48: 0x000000010007c89a miniruby`ruby_exec_node(n=0x0000000100f21240) + 36 at eval.c:310
frame #49: 0x000000010007c86d miniruby`ruby_run_node(n=0x0000000100f21240) + 62 at eval.c:302
frame #50: 0x0000000100001399 miniruby`main(argc=9, argv=0x00007fff5fbfdae0) + 113 at main.c:42
frame #51: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61569 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:05 +03:00
gc_report ( 2 , objspace , " gc_writebarrier_incremental: [IN] %p -> %s \n " , ( void * ) a , obj_info ( b ) ) ;
2015-03-17 13:26:39 +03:00
gc_mark_from ( objspace , b , a ) ;
2014-09-08 08:11:00 +04:00
}
2015-03-17 13:26:39 +03:00
}
else if ( RVALUE_OLD_P ( a ) & & ! RVALUE_OLD_P ( b ) ) {
if ( ! RVALUE_WB_UNPROTECTED ( b ) ) {
fix SEGV inspecting uninitialized objects
obj_info() assumes the given object is alive. OTOH
gc_writebarrier_incremental is called before or in middle of
object initialization. Can casue SEGV.
(lldb) run
Process 48188 launched: './miniruby' (x86_64)
Process 48188 stopped
* thread #1: tid = 0x30fd53, 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=EXC_I386_GPFLT)
frame #0: 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072
2069 static inline const VALUE *
2070 rb_array_const_ptr(VALUE a)
2071 {
-> 2072 return FIX_CONST_VALUE_PTR((RBASIC(a)->flags & RARRAY_EMBED_FLAG) ?
2073 RARRAY(a)->as.ary : RARRAY(a)->as.heap.ptr);
2074 }
2075
(lldb) bt
* thread #1: tid = 0x30fd53, 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=EXC_I386_GPFLT)
* frame #0: 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072
frame #1: 0x00000001000bfaab miniruby`pathobj_path(pathobj=5251291222225483145) + 70 at vm_core.h:269
frame #2: 0x00000001000c25ff miniruby`rb_iseq_path(iseq=0x00000001025b71a8) + 32 at iseq.c:723
frame #3: 0x000000010009db09 miniruby`rb_raw_iseq_info(buff="0x00000001025b7158 [0 ] proc (Proc)", buff_size=256, iseq=0x00000001025b71a8) + 69 at gc.c:9274
frame #4: 0x000000010009e1d5 miniruby`rb_raw_obj_info(buff="0x00000001025b7158 [0 ] proc (Proc)", buff_size=256, obj=4334514520) + 1546 at gc.c:9351
frame #5: 0x000000010009e4d5 miniruby`obj_info(obj=4334514520) + 98 at gc.c:9429
frame #6: 0x0000000100096658 miniruby`gc_writebarrier_incremental(a=4334514520, b=4334514600, objspace=0x00000001007d3280) + 61 at gc.c:5963
frame #7: 0x00000001000968ca miniruby`rb_gc_writebarrier(a=4334514520, b=4334514600) + 127 at gc.c:6009
frame #8: 0x00000001001eabe0 miniruby`rb_obj_written(a=4334514520, oldv=52, b=4334514600, filename="/Users/urabe.shyouhei/data/src/pedantic/vm.c", line=821) + 72 at ruby.h:1472
frame #9: 0x00000001001eac2c miniruby`rb_obj_write(a=4334514520, slot=0x000000010259ff10, b=4334514600, filename="/Users/urabe.shyouhei/data/src/pedantic/vm.c", line=821) + 70 at ruby.h:1489
frame #10: 0x0000000100208b6f miniruby`vm_proc_create_from_captured(klass=4311027960, captured=0x0000000102500338, block_type=block_type_ifunc, is_from_method='\0', is_lambda='\x01') + 137 at vm.c:821
frame #11: 0x0000000100208e5c miniruby`rb_vm_make_proc_lambda(ec=0x00000001007d3548, captured=0x0000000102500338, klass=4311027960, is_lambda='\x01') + 134 at vm.c:892
frame #12: 0x000000010011f08e miniruby`proc_new(klass=4311027960, is_lambda='\x01') + 445 at proc.c:752
frame #13: 0x000000010011f110 miniruby`rb_block_lambda + 27 at proc.c:808
frame #14: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`rb_block_lambda at proc.c:807), recv=4310991600, argc=0, argv=0x0000000000000000) + 41 at vm_insnhelper.c:1729
frame #15: 0x00000001002033de miniruby`vm_call0_cfunc_with_frame(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 370 at vm_eval.c:85
frame #16: 0x00000001002034d9 miniruby`vm_call0_cfunc(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 59 at vm_eval.c:100
frame #17: 0x000000010020368f miniruby`vm_call0_body(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 436 at vm_eval.c:131
frame #18: 0x000000010020326a miniruby`vm_call0(ec=0x00000001007d3548, recv=4310991600, id=2993, argc=0, argv=0x0000000000000000, me=0x0000000100f48110) + 142 at vm_eval.c:58
frame #19: 0x0000000100203c60 miniruby`rb_call0(ec=0x00000001007d3548, recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000, scope=CALL_FCALL, self=4334514640) + 166 at vm_eval.c:296
frame #20: 0x0000000100204827 miniruby`rb_call(recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) + 84 at vm_eval.c:589
frame #21: 0x000000010020518b miniruby`rb_funcallv(recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000) + 52 at vm_eval.c:815
frame #22: 0x000000010012242e miniruby`mlambda(method=0) + 45 at proc.c:2661
frame #23: 0x0000000100205bac miniruby`rb_iterate0(it_proc=(miniruby`mlambda at proc.c:2660), data1=0, ifunc=0x00000001025b71a8, ec=0x00000001007d3548) + 380 at vm_eval.c:1134
frame #24: 0x0000000100205d16 miniruby`rb_iterate(it_proc=(miniruby`mlambda at proc.c:2660), data1=0, bl_proc=(miniruby`bmcall at proc.c:2666), data2=4334514640) + 88 at vm_eval.c:1166
frame #25: 0x00000001001224c7 miniruby`method_to_proc(method=4334514640) + 43 at proc.c:2701
frame #26: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`method_to_proc at proc.c:2688), recv=4334514640, argc=0, argv=0x0000000102400568) + 41 at vm_insnhelper.c:1729
frame #27: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 386 at vm_insnhelper.c:1918
frame #28: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 149 at vm_insnhelper.c:1934
frame #29: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 239 at vm_insnhelper.c:2232
frame #30: 0x00000001001f49a4 miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 117 at vm_insnhelper.c:2355
frame #31: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 59 at vm_insnhelper.c:2398
frame #32: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #33: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #34: 0x000000010020c3d1 miniruby`rb_iseq_eval(iseq=0x00000001007f8270) + 52 at vm.c:2008
frame #35: 0x00000001000caa4a miniruby`rb_load_internal0(ec=0x00000001007d3548, fname=4310799960, wrap=0) + 631 at load.c:611
frame #36: 0x00000001000cab36 miniruby`rb_load_internal(fname=4310799960, wrap=0) + 46 at load.c:642
frame #37: 0x00000001000cae1d miniruby`rb_f_load(argc=1, argv=0x00000001024004b8) + 217 at load.c:710
frame #38: 0x00000001001f247c miniruby`call_cfunc_m1(func=(miniruby`rb_f_load at load.c:695), recv=4311327440, argc=1, argv=0x00000001024004b8) + 47 at vm_insnhelper.c:1723
frame #39: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 386 at vm_insnhelper.c:1918
frame #40: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 149 at vm_insnhelper.c:1934
frame #41: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 239 at vm_insnhelper.c:2232
frame #42: 0x00000001001f4a2c miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 253 at vm_insnhelper.c:2366
frame #43: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 59 at vm_insnhelper.c:2398
frame #44: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #45: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #46: 0x000000010020c40f miniruby`rb_iseq_eval_main(iseq=0x0000000100f21240) + 52 at vm.c:2019
frame #47: 0x000000010007c774 miniruby`ruby_exec_internal(n=0x0000000100f21240) + 297 at eval.c:246
frame #48: 0x000000010007c89a miniruby`ruby_exec_node(n=0x0000000100f21240) + 36 at eval.c:310
frame #49: 0x000000010007c86d miniruby`ruby_run_node(n=0x0000000100f21240) + 62 at eval.c:302
frame #50: 0x0000000100001399 miniruby`main(argc=9, argv=0x00007fff5fbfdae0) + 113 at main.c:42
frame #51: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61569 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:05 +03:00
gc_report ( 1 , objspace , " gc_writebarrier_incremental: [GN] %p -> %s \n " , ( void * ) a , obj_info ( b ) ) ;
2015-03-17 13:26:39 +03:00
RVALUE_AGE_SET_OLD ( objspace , b ) ;
2013-06-25 07:24:07 +04:00
2015-03-17 13:26:39 +03:00
if ( RVALUE_BLACK_P ( b ) ) {
gc_grey ( objspace , b ) ;
2014-09-08 08:11:00 +04:00
}
}
2015-03-17 13:26:39 +03:00
else {
fix SEGV inspecting uninitialized objects
obj_info() assumes the given object is alive. OTOH
gc_writebarrier_incremental is called before or in middle of
object initialization. Can casue SEGV.
(lldb) run
Process 48188 launched: './miniruby' (x86_64)
Process 48188 stopped
* thread #1: tid = 0x30fd53, 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=EXC_I386_GPFLT)
frame #0: 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072
2069 static inline const VALUE *
2070 rb_array_const_ptr(VALUE a)
2071 {
-> 2072 return FIX_CONST_VALUE_PTR((RBASIC(a)->flags & RARRAY_EMBED_FLAG) ?
2073 RARRAY(a)->as.ary : RARRAY(a)->as.heap.ptr);
2074 }
2075
(lldb) bt
* thread #1: tid = 0x30fd53, 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=EXC_I386_GPFLT)
* frame #0: 0x00000001000bf7a9 miniruby`rb_array_const_ptr(a=5251291222225483145) + 12 at ruby.h:2072
frame #1: 0x00000001000bfaab miniruby`pathobj_path(pathobj=5251291222225483145) + 70 at vm_core.h:269
frame #2: 0x00000001000c25ff miniruby`rb_iseq_path(iseq=0x00000001025b71a8) + 32 at iseq.c:723
frame #3: 0x000000010009db09 miniruby`rb_raw_iseq_info(buff="0x00000001025b7158 [0 ] proc (Proc)", buff_size=256, iseq=0x00000001025b71a8) + 69 at gc.c:9274
frame #4: 0x000000010009e1d5 miniruby`rb_raw_obj_info(buff="0x00000001025b7158 [0 ] proc (Proc)", buff_size=256, obj=4334514520) + 1546 at gc.c:9351
frame #5: 0x000000010009e4d5 miniruby`obj_info(obj=4334514520) + 98 at gc.c:9429
frame #6: 0x0000000100096658 miniruby`gc_writebarrier_incremental(a=4334514520, b=4334514600, objspace=0x00000001007d3280) + 61 at gc.c:5963
frame #7: 0x00000001000968ca miniruby`rb_gc_writebarrier(a=4334514520, b=4334514600) + 127 at gc.c:6009
frame #8: 0x00000001001eabe0 miniruby`rb_obj_written(a=4334514520, oldv=52, b=4334514600, filename="/Users/urabe.shyouhei/data/src/pedantic/vm.c", line=821) + 72 at ruby.h:1472
frame #9: 0x00000001001eac2c miniruby`rb_obj_write(a=4334514520, slot=0x000000010259ff10, b=4334514600, filename="/Users/urabe.shyouhei/data/src/pedantic/vm.c", line=821) + 70 at ruby.h:1489
frame #10: 0x0000000100208b6f miniruby`vm_proc_create_from_captured(klass=4311027960, captured=0x0000000102500338, block_type=block_type_ifunc, is_from_method='\0', is_lambda='\x01') + 137 at vm.c:821
frame #11: 0x0000000100208e5c miniruby`rb_vm_make_proc_lambda(ec=0x00000001007d3548, captured=0x0000000102500338, klass=4311027960, is_lambda='\x01') + 134 at vm.c:892
frame #12: 0x000000010011f08e miniruby`proc_new(klass=4311027960, is_lambda='\x01') + 445 at proc.c:752
frame #13: 0x000000010011f110 miniruby`rb_block_lambda + 27 at proc.c:808
frame #14: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`rb_block_lambda at proc.c:807), recv=4310991600, argc=0, argv=0x0000000000000000) + 41 at vm_insnhelper.c:1729
frame #15: 0x00000001002033de miniruby`vm_call0_cfunc_with_frame(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 370 at vm_eval.c:85
frame #16: 0x00000001002034d9 miniruby`vm_call0_cfunc(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 59 at vm_eval.c:100
frame #17: 0x000000010020368f miniruby`vm_call0_body(ec=0x00000001007d3548, calling=0x00007fff5fbfb080, ci=0x00007fff5fbfb070, cc=0x00007fff5fbfb0a0, argv=0x0000000000000000) + 436 at vm_eval.c:131
frame #18: 0x000000010020326a miniruby`vm_call0(ec=0x00000001007d3548, recv=4310991600, id=2993, argc=0, argv=0x0000000000000000, me=0x0000000100f48110) + 142 at vm_eval.c:58
frame #19: 0x0000000100203c60 miniruby`rb_call0(ec=0x00000001007d3548, recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000, scope=CALL_FCALL, self=4334514640) + 166 at vm_eval.c:296
frame #20: 0x0000000100204827 miniruby`rb_call(recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) + 84 at vm_eval.c:589
frame #21: 0x000000010020518b miniruby`rb_funcallv(recv=4310991600, mid=2993, argc=0, argv=0x0000000000000000) + 52 at vm_eval.c:815
frame #22: 0x000000010012242e miniruby`mlambda(method=0) + 45 at proc.c:2661
frame #23: 0x0000000100205bac miniruby`rb_iterate0(it_proc=(miniruby`mlambda at proc.c:2660), data1=0, ifunc=0x00000001025b71a8, ec=0x00000001007d3548) + 380 at vm_eval.c:1134
frame #24: 0x0000000100205d16 miniruby`rb_iterate(it_proc=(miniruby`mlambda at proc.c:2660), data1=0, bl_proc=(miniruby`bmcall at proc.c:2666), data2=4334514640) + 88 at vm_eval.c:1166
frame #25: 0x00000001001224c7 miniruby`method_to_proc(method=4334514640) + 43 at proc.c:2701
frame #26: 0x00000001001f24a7 miniruby`call_cfunc_0(func=(miniruby`method_to_proc at proc.c:2688), recv=4334514640, argc=0, argv=0x0000000102400568) + 41 at vm_insnhelper.c:1729
frame #27: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 386 at vm_insnhelper.c:1918
frame #28: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 149 at vm_insnhelper.c:1934
frame #29: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 239 at vm_insnhelper.c:2232
frame #30: 0x00000001001f49a4 miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 117 at vm_insnhelper.c:2355
frame #31: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x0000000102500350, calling=0x00007fff5fbfc030, ci=0x0000000100f2ec70, cc=0x0000000102735718) + 59 at vm_insnhelper.c:2398
frame #32: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #33: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #34: 0x000000010020c3d1 miniruby`rb_iseq_eval(iseq=0x00000001007f8270) + 52 at vm.c:2008
frame #35: 0x00000001000caa4a miniruby`rb_load_internal0(ec=0x00000001007d3548, fname=4310799960, wrap=0) + 631 at load.c:611
frame #36: 0x00000001000cab36 miniruby`rb_load_internal(fname=4310799960, wrap=0) + 46 at load.c:642
frame #37: 0x00000001000cae1d miniruby`rb_f_load(argc=1, argv=0x00000001024004b8) + 217 at load.c:710
frame #38: 0x00000001001f247c miniruby`call_cfunc_m1(func=(miniruby`rb_f_load at load.c:695), recv=4311327440, argc=1, argv=0x00000001024004b8) + 47 at vm_insnhelper.c:1723
frame #39: 0x00000001001f2f87 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 386 at vm_insnhelper.c:1918
frame #40: 0x00000001001f30d6 miniruby`vm_call_cfunc(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 149 at vm_insnhelper.c:1934
frame #41: 0x00000001001f4319 miniruby`vm_call_method_each_type(ec=0x00000001007d3548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 239 at vm_insnhelper.c:2232
frame #42: 0x00000001001f4a2c miniruby`vm_call_method(ec=0x00000001007d3548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 253 at vm_insnhelper.c:2366
frame #43: 0x00000001001f4b7a miniruby`vm_call_general(ec=0x00000001007d3548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd3e0, ci=0x0000000102541070, cc=0x0000000100f9e918) + 59 at vm_insnhelper.c:2398
frame #44: 0x00000001001faf0e miniruby`vm_exec_core(ec=0x00000001007d3548, initial=0) + 8471 at insns.def:915
frame #45: 0x000000010020b75d miniruby`vm_exec(ec=0x00000001007d3548) + 230 at vm.c:1771
frame #46: 0x000000010020c40f miniruby`rb_iseq_eval_main(iseq=0x0000000100f21240) + 52 at vm.c:2019
frame #47: 0x000000010007c774 miniruby`ruby_exec_internal(n=0x0000000100f21240) + 297 at eval.c:246
frame #48: 0x000000010007c89a miniruby`ruby_exec_node(n=0x0000000100f21240) + 36 at eval.c:310
frame #49: 0x000000010007c86d miniruby`ruby_run_node(n=0x0000000100f21240) + 62 at eval.c:302
frame #50: 0x0000000100001399 miniruby`main(argc=9, argv=0x00007fff5fbfdae0) + 113 at main.c:42
frame #51: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61569 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:05 +03:00
gc_report ( 1 , objspace , " gc_writebarrier_incremental: [LL] %p -> %s \n " , ( void * ) a , obj_info ( b ) ) ;
2015-03-17 13:26:39 +03:00
gc_remember_unprotected ( objspace , b ) ;
}
2014-09-08 08:11:00 +04:00
}
}
}
2015-03-17 13:26:39 +03:00
# else
2015-11-19 14:13:27 +03:00
# define gc_writebarrier_incremental(a, b, objspace)
2014-09-08 08:11:00 +04:00
# endif
2015-03-17 13:26:39 +03:00
void
rb_gc_writebarrier ( VALUE a , VALUE b )
{
rb_objspace_t * objspace = & rb_objspace ;
if ( RGENGC_CHECK_MODE & & SPECIAL_CONST_P ( a ) ) rb_bug ( " rb_gc_writebarrier: a is special const " ) ;
if ( RGENGC_CHECK_MODE & & SPECIAL_CONST_P ( b ) ) rb_bug ( " rb_gc_writebarrier: b is special const " ) ;
2015-11-19 14:13:27 +03:00
if ( ! is_incremental_marking ( objspace ) ) {
2015-03-17 13:26:39 +03:00
if ( ! RVALUE_OLD_P ( a ) | | RVALUE_OLD_P ( b ) ) {
return ;
}
else {
2015-11-19 14:13:27 +03:00
gc_writebarrier_generational ( a , b , objspace ) ;
2015-03-17 13:26:39 +03:00
}
}
else { /* slow path */
2015-11-19 14:13:27 +03:00
gc_writebarrier_incremental ( a , b , objspace ) ;
2015-03-17 13:26:39 +03:00
}
}
2014-09-08 08:11:00 +04:00
void
rb_gc_writebarrier_unprotect ( VALUE obj )
{
if ( RVALUE_WB_UNPROTECTED ( obj ) ) {
return ;
}
else {
rb_objspace_t * objspace = & rb_objspace ;
gc_report ( 2 , objspace , " rb_gc_writebarrier_unprotect: %s %s \n " , obj_info ( obj ) ,
rgengc_remembered ( objspace , obj ) ? " (already remembered) " : " " ) ;
if ( RVALUE_OLD_P ( obj ) ) {
gc_report ( 1 , objspace , " rb_gc_writebarrier_unprotect: %s \n " , obj_info ( obj ) ) ;
RVALUE_DEMOTE ( objspace , obj ) ;
2014-09-11 14:23:36 +04:00
gc_mark_set ( objspace , obj ) ;
2014-09-08 08:11:00 +04:00
gc_remember_unprotected ( objspace , obj ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if RGENGC_PROFILE
2014-09-10 02:32:09 +04:00
objspace - > profile . total_shade_operation_count + + ;
2013-05-26 20:43:21 +04:00
# if RGENGC_PROFILE >= 2
2014-09-08 08:11:00 +04:00
objspace - > profile . shade_operation_count_types [ BUILTIN_TYPE ( obj ) ] + + ;
2013-05-26 20:43:21 +04:00
# endif /* RGENGC_PROFILE >= 2 */
2013-11-04 22:59:33 +04:00
# endif /* RGENGC_PROFILE */
2014-09-08 08:11:00 +04:00
}
else {
RVALUE_AGE_RESET ( obj ) ;
}
2018-09-26 10:28:04 +03:00
RB_DEBUG_COUNTER_INC ( obj_wb_unprotect ) ;
2014-09-08 08:11:00 +04:00
MARK_IN_BITMAP ( GET_HEAP_WB_UNPROTECTED_BITS ( obj ) , obj ) ;
2013-11-04 22:59:33 +04:00
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
2014-09-08 08:11:00 +04:00
/*
* remember ` obj ' if needed .
*/
mjit_compile.c: merge initial JIT compiler
which has been developed by Takashi Kokubun <takashikkbn@gmail> as
YARV-MJIT. Many of its bugs are fixed by wanabe <s.wanabe@gmail.com>.
This JIT compiler is designed to be a safe migration path to introduce
JIT compiler to MRI. So this commit does not include any bytecode
changes or dynamic instruction modifications, which are done in original
MJIT.
This commit even strips off some aggressive optimizations from
YARV-MJIT, and thus it's slower than YARV-MJIT too. But it's still
fairly faster than Ruby 2.5 in some benchmarks (attached below).
Note that this JIT compiler passes `make test`, `make test-all`, `make
test-spec` without JIT, and even with JIT. Not only it's perfectly safe
with JIT disabled because it does not replace VM instructions unlike
MJIT, but also with JIT enabled it stably runs Ruby applications
including Rails applications.
I'm expecting this version as just "initial" JIT compiler. I have many
optimization ideas which are skipped for initial merging, and you may
easily replace this JIT compiler with a faster one by just replacing
mjit_compile.c. `mjit_compile` interface is designed for the purpose.
common.mk: update dependencies for mjit_compile.c.
internal.h: declare `rb_vm_insn_addr2insn` for MJIT.
vm.c: exclude some definitions if `-DMJIT_HEADER` is provided to
compiler. This avoids to include some functions which take a long time
to compile, e.g. vm_exec_core. Some of the purpose is achieved in
transform_mjit_header.rb (see `IGNORED_FUNCTIONS`) but others are
manually resolved for now. Load mjit_helper.h for MJIT header.
mjit_helper.h: New. This is a file used only by JIT-ed code. I'll
refactor `mjit_call_cfunc` later.
vm_eval.c: add some #ifdef switches to skip compiling some functions
like Init_vm_eval.
win32/mkexports.rb: export thread/ec functions, which are used by MJIT.
include/ruby/defines.h: add MJIT_FUNC_EXPORTED macro alis to clarify
that a function is exported only for MJIT.
array.c: export a function used by MJIT.
bignum.c: ditto.
class.c: ditto.
compile.c: ditto.
error.c: ditto.
gc.c: ditto.
hash.c: ditto.
iseq.c: ditto.
numeric.c: ditto.
object.c: ditto.
proc.c: ditto.
re.c: ditto.
st.c: ditto.
string.c: ditto.
thread.c: ditto.
variable.c: ditto.
vm_backtrace.c: ditto.
vm_insnhelper.c: ditto.
vm_method.c: ditto.
I would like to improve maintainability of function exports, but I
believe this way is acceptable as initial merging if we clarify the
new exports are for MJIT (so that we can use them as TODO list to fix)
and add unit tests to detect unresolved symbols.
I'll add unit tests of JIT compilations in succeeding commits.
Author: Takashi Kokubun <takashikkbn@gmail.com>
Contributor: wanabe <s.wanabe@gmail.com>
Part of [Feature #14235]
---
* Known issues
* Code generated by gcc is faster than clang. The benchmark may be worse
in macOS. Following benchmark result is provided by gcc w/ Linux.
* Performance is decreased when Google Chrome is running
* JIT can work on MinGW, but it doesn't improve performance at least
in short running benchmark.
* Currently it doesn't perform well with Rails. We'll try to fix this
before release.
---
* Benchmark reslts
Benchmarked with:
Intel 4.0GHz i7-4790K with 16GB memory under x86-64 Ubuntu 8 Cores
- 2.0.0-p0: Ruby 2.0.0-p0
- r62186: Ruby trunk (early 2.6.0), before MJIT changes
- JIT off: On this commit, but without `--jit` option
- JIT on: On this commit, and with `--jit` option
** Optcarrot fps
Benchmark: https://github.com/mame/optcarrot
| |2.0.0-p0 |r62186 |JIT off |JIT on |
|:--------|:--------|:--------|:--------|:--------|
|fps |37.32 |51.46 |51.31 |58.88 |
|vs 2.0.0 |1.00x |1.38x |1.37x |1.58x |
** MJIT benchmarks
Benchmark: https://github.com/benchmark-driver/mjit-benchmarks
(Original: https://github.com/vnmakarov/ruby/tree/rtl_mjit_branch/MJIT-benchmarks)
| |2.0.0-p0 |r62186 |JIT off |JIT on |
|:----------|:--------|:--------|:--------|:--------|
|aread |1.00 |1.09 |1.07 |2.19 |
|aref |1.00 |1.13 |1.11 |2.22 |
|aset |1.00 |1.50 |1.45 |2.64 |
|awrite |1.00 |1.17 |1.13 |2.20 |
|call |1.00 |1.29 |1.26 |2.02 |
|const2 |1.00 |1.10 |1.10 |2.19 |
|const |1.00 |1.11 |1.10 |2.19 |
|fannk |1.00 |1.04 |1.02 |1.00 |
|fib |1.00 |1.32 |1.31 |1.84 |
|ivread |1.00 |1.13 |1.12 |2.43 |
|ivwrite |1.00 |1.23 |1.21 |2.40 |
|mandelbrot |1.00 |1.13 |1.16 |1.28 |
|meteor |1.00 |2.97 |2.92 |3.17 |
|nbody |1.00 |1.17 |1.15 |1.49 |
|nest-ntimes|1.00 |1.22 |1.20 |1.39 |
|nest-while |1.00 |1.10 |1.10 |1.37 |
|norm |1.00 |1.18 |1.16 |1.24 |
|nsvb |1.00 |1.16 |1.16 |1.17 |
|red-black |1.00 |1.02 |0.99 |1.12 |
|sieve |1.00 |1.30 |1.28 |1.62 |
|trees |1.00 |1.14 |1.13 |1.19 |
|while |1.00 |1.12 |1.11 |2.41 |
** Discourse's script/bench.rb
Benchmark: https://github.com/discourse/discourse/blob/v1.8.7/script/bench.rb
NOTE: Rails performance was somehow a little degraded with JIT for now.
We should fix this.
(At least I know opt_aref is performing badly in JIT and I have an idea
to fix it. Please wait for the fix.)
*** JIT off
Your Results: (note for timings- percentile is first, duration is second in millisecs)
categories_admin:
50: 17
75: 18
90: 22
99: 29
home_admin:
50: 21
75: 21
90: 27
99: 40
topic_admin:
50: 17
75: 18
90: 22
99: 32
categories:
50: 35
75: 41
90: 43
99: 77
home:
50: 39
75: 46
90: 49
99: 95
topic:
50: 46
75: 52
90: 56
99: 101
*** JIT on
Your Results: (note for timings- percentile is first, duration is second in millisecs)
categories_admin:
50: 19
75: 21
90: 25
99: 33
home_admin:
50: 24
75: 26
90: 30
99: 35
topic_admin:
50: 19
75: 20
90: 25
99: 30
categories:
50: 40
75: 44
90: 48
99: 76
home:
50: 42
75: 48
90: 51
99: 89
topic:
50: 49
75: 55
90: 58
99: 99
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62197 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 14:22:28 +03:00
MJIT_FUNC_EXPORTED void
2014-09-08 08:11:00 +04:00
rb_gc_writebarrier_remember ( VALUE obj )
2013-07-19 10:11:09 +04:00
{
rb_objspace_t * objspace = & rb_objspace ;
2014-07-25 08:47:48 +04:00
2014-09-08 08:11:00 +04:00
gc_report ( 1 , objspace , " rb_gc_writebarrier_remember: %s \n " , obj_info ( obj ) ) ;
2014-07-25 08:47:48 +04:00
2014-09-08 08:11:00 +04:00
if ( is_incremental_marking ( objspace ) ) {
if ( RVALUE_BLACK_P ( obj ) ) {
gc_grey ( objspace , obj ) ;
}
}
else {
if ( RVALUE_OLD_P ( obj ) ) {
rgengc_remember ( objspace , obj ) ;
}
}
2013-07-19 10:11:09 +04:00
}
2013-07-24 13:42:43 +04:00
static st_table * rgengc_unprotect_logging_table ;
static int
2014-05-07 19:43:37 +04:00
rgengc_unprotect_logging_exit_func_i ( st_data_t key , st_data_t val , st_data_t arg )
2013-07-24 13:42:43 +04:00
{
fprintf ( stderr , " %s \t %d \n " , ( char * ) key , ( int ) val ) ;
return ST_CONTINUE ;
}
static void
rgengc_unprotect_logging_exit_func ( void )
{
st_foreach ( rgengc_unprotect_logging_table , rgengc_unprotect_logging_exit_func_i , 0 ) ;
}
void
rb_gc_unprotect_logging ( void * objptr , const char * filename , int line )
{
VALUE obj = ( VALUE ) objptr ;
if ( rgengc_unprotect_logging_table = = 0 ) {
rgengc_unprotect_logging_table = st_init_strtable ( ) ;
atexit ( rgengc_unprotect_logging_exit_func ) ;
}
2014-09-08 08:11:00 +04:00
if ( RVALUE_WB_UNPROTECTED ( obj ) = = 0 ) {
2013-07-24 13:42:43 +04:00
char buff [ 0x100 ] ;
st_data_t cnt = 1 ;
char * ptr = buff ;
2014-09-08 08:11:00 +04:00
snprintf ( ptr , 0x100 - 1 , " %s|%s:%d " , obj_info ( obj ) , filename , line ) ;
2013-07-24 13:42:43 +04:00
if ( st_lookup ( rgengc_unprotect_logging_table , ( st_data_t ) ptr , & cnt ) ) {
cnt + + ;
}
else {
2016-12-26 12:28:46 +03:00
ptr = ( strdup ) ( buff ) ;
2016-05-08 20:52:38 +03:00
if ( ! ptr ) rb_memerror ( ) ;
2013-07-24 13:42:43 +04:00
}
st_insert ( rgengc_unprotect_logging_table , ( st_data_t ) ptr , cnt ) ;
}
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# endif /* USE_RGENGC */
2014-09-08 08:11:00 +04:00
void
rb_copy_wb_protected_attribute ( VALUE dest , VALUE obj )
{
# if USE_RGENGC
2015-03-19 10:10:58 +03:00
rb_objspace_t * objspace = & rb_objspace ;
2015-03-19 16:13:46 +03:00
if ( RVALUE_WB_UNPROTECTED ( obj ) & & ! RVALUE_WB_UNPROTECTED ( dest ) ) {
2015-03-19 10:10:58 +03:00
if ( ! RVALUE_OLD_P ( dest ) ) {
MARK_IN_BITMAP ( GET_HEAP_WB_UNPROTECTED_BITS ( dest ) , dest ) ;
RVALUE_AGE_RESET_RAW ( dest ) ;
}
else {
2015-03-19 15:26:49 +03:00
RVALUE_DEMOTE ( objspace , dest ) ;
2015-03-19 10:10:58 +03:00
}
2014-09-08 08:11:00 +04:00
}
2014-12-11 13:15:30 +03:00
check_rvalue_consistency ( dest ) ;
2014-09-08 08:11:00 +04:00
# endif
}
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
/* RGENGC analysis information */
VALUE
rb_obj_rgengc_writebarrier_protected_p ( VALUE obj )
{
2014-09-08 08:11:00 +04:00
# if USE_RGENGC
return RVALUE_WB_UNPROTECTED ( obj ) ? Qfalse : Qtrue ;
# else
return Qfalse ;
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
}
VALUE
rb_obj_rgengc_promoted_p ( VALUE obj )
{
return OBJ_PROMOTED ( obj ) ? Qtrue : Qfalse ;
}
2013-12-10 06:26:09 +04:00
size_t
rb_obj_gc_flags ( VALUE obj , ID * flags , size_t max )
{
size_t n = 0 ;
static ID ID_marked ;
# if USE_RGENGC
2015-03-18 21:02:13 +03:00
static ID ID_wb_protected , ID_old , ID_marking , ID_uncollectible ;
2013-12-10 06:26:09 +04:00
# endif
if ( ! ID_marked ) {
# define I(s) ID_##s = rb_intern(#s);
I ( marked ) ;
# if USE_RGENGC
I ( wb_protected ) ;
I ( old ) ;
2014-09-08 08:11:00 +04:00
I ( marking ) ;
2015-03-18 21:02:13 +03:00
I ( uncollectible ) ;
2013-12-10 06:26:09 +04:00
# endif
# undef I
}
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
if ( RVALUE_WB_UNPROTECTED ( obj ) = = 0 & & n < max ) flags [ n + + ] = ID_wb_protected ;
if ( RVALUE_OLD_P ( obj ) & & n < max ) flags [ n + + ] = ID_old ;
2015-03-18 21:02:13 +03:00
if ( RVALUE_UNCOLLECTIBLE ( obj ) & & n < max ) flags [ n + + ] = ID_uncollectible ;
2014-09-08 08:11:00 +04:00
if ( MARKED_IN_BITMAP ( GET_HEAP_MARKING_BITS ( obj ) , obj ) & & n < max ) flags [ n + + ] = ID_marking ;
# endif
if ( MARKED_IN_BITMAP ( GET_HEAP_MARK_BITS ( obj ) , obj ) & & n < max ) flags [ n + + ] = ID_marked ;
2013-12-10 06:26:09 +04:00
return n ;
}
2012-08-05 14:39:37 +04:00
/* GC */
void
2014-09-08 08:11:00 +04:00
rb_gc_force_recycle ( VALUE obj )
2012-08-05 14:39:37 +04:00
{
rb_objspace_t * objspace = & rb_objspace ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
int is_old = RVALUE_OLD_P ( obj ) ;
gc_report ( 2 , objspace , " rb_gc_force_recycle: %s \n " , obj_info ( obj ) ) ;
2014-06-03 07:55:04 +04:00
if ( is_old ) {
2014-09-08 08:11:00 +04:00
if ( RVALUE_MARKED ( obj ) ) {
2014-09-10 06:35:17 +04:00
objspace - > rgengc . old_objects - - ;
2014-09-08 08:11:00 +04:00
}
2014-06-03 07:55:04 +04:00
}
2015-03-18 21:02:13 +03:00
CLEAR_IN_BITMAP ( GET_HEAP_UNCOLLECTIBLE_BITS ( obj ) , obj ) ;
2014-09-08 08:11:00 +04:00
CLEAR_IN_BITMAP ( GET_HEAP_WB_UNPROTECTED_BITS ( obj ) , obj ) ;
# if GC_ENABLE_INCREMENTAL_MARK
if ( is_incremental_marking ( objspace ) ) {
if ( MARKED_IN_BITMAP ( GET_HEAP_MARKING_BITS ( obj ) , obj ) ) {
invalidate_mark_stack ( & objspace - > mark_stack , obj ) ;
CLEAR_IN_BITMAP ( GET_HEAP_MARKING_BITS ( obj ) , obj ) ;
}
CLEAR_IN_BITMAP ( GET_HEAP_MARK_BITS ( obj ) , obj ) ;
2014-06-03 07:55:04 +04:00
}
2014-09-08 08:11:00 +04:00
else {
2014-06-03 07:55:04 +04:00
# endif
2014-09-08 08:11:00 +04:00
if ( is_old | | ! GET_HEAP_PAGE ( obj ) - > flags . before_sweep ) {
CLEAR_IN_BITMAP ( GET_HEAP_MARK_BITS ( obj ) , obj ) ;
}
CLEAR_IN_BITMAP ( GET_HEAP_MARKING_BITS ( obj ) , obj ) ;
# if GC_ENABLE_INCREMENTAL_MARK
2013-06-25 20:20:39 +04:00
}
2014-09-08 08:11:00 +04:00
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# endif
2014-09-09 14:01:18 +04:00
objspace - > profile . total_freed_objects + + ;
2014-06-03 07:55:04 +04:00
2014-09-08 08:11:00 +04:00
heap_page_add_freeobj ( objspace , GET_HEAP_PAGE ( obj ) , obj ) ;
2013-07-16 12:32:32 +04:00
2013-11-23 03:50:08 +04:00
/* Disable counting swept_slots because there are no meaning.
2013-10-22 14:28:31 +04:00
* if ( ! MARKED_IN_BITMAP ( GET_HEAP_MARK_BITS ( p ) , p ) ) {
2013-11-23 03:50:08 +04:00
* objspace - > heap . swept_slots + + ;
2013-10-22 14:28:31 +04:00
* }
*/
2012-08-05 14:39:37 +04:00
}
2014-07-06 08:42:35 +04:00
# ifndef MARK_OBJECT_ARY_BUCKET_SIZE
# define MARK_OBJECT_ARY_BUCKET_SIZE 1024
# endif
2012-08-05 14:39:37 +04:00
void
rb_gc_register_mark_object ( VALUE obj )
{
2017-11-07 08:09:27 +03:00
VALUE ary_ary = GET_VM ( ) - > mark_object_ary ;
2014-07-06 08:42:35 +04:00
VALUE ary = rb_ary_last ( 0 , 0 , ary_ary ) ;
if ( ary = = Qnil | | RARRAY_LEN ( ary ) > = MARK_OBJECT_ARY_BUCKET_SIZE ) {
ary = rb_ary_tmp_new ( MARK_OBJECT_ARY_BUCKET_SIZE ) ;
rb_ary_push ( ary_ary , ary ) ;
}
2012-08-05 14:39:37 +04:00
rb_ary_push ( ary , obj ) ;
}
void
rb_gc_register_address ( VALUE * addr )
{
rb_objspace_t * objspace = & rb_objspace ;
struct gc_list * tmp ;
tmp = ALLOC ( struct gc_list ) ;
2014-07-10 07:24:17 +04:00
tmp - > next = global_list ;
2012-08-05 14:39:37 +04:00
tmp - > varptr = addr ;
2014-07-10 07:24:17 +04:00
global_list = tmp ;
2012-08-05 14:39:37 +04:00
}
void
rb_gc_unregister_address ( VALUE * addr )
{
rb_objspace_t * objspace = & rb_objspace ;
2014-07-10 07:24:17 +04:00
struct gc_list * tmp = global_list ;
2012-08-05 14:39:37 +04:00
if ( tmp - > varptr = = addr ) {
2014-07-10 07:24:17 +04:00
global_list = tmp - > next ;
2012-08-05 14:39:37 +04:00
xfree ( tmp ) ;
return ;
}
while ( tmp - > next ) {
if ( tmp - > next - > varptr = = addr ) {
struct gc_list * t = tmp - > next ;
tmp - > next = tmp - > next - > next ;
xfree ( t ) ;
break ;
}
tmp = tmp - > next ;
}
}
2013-07-17 12:25:11 +04:00
void
rb_global_variable ( VALUE * var )
{
rb_gc_register_address ( var ) ;
}
2012-08-05 14:39:37 +04:00
# define GC_NOTIFY 0
2014-04-21 04:39:43 +04:00
enum {
gc_stress_no_major ,
gc_stress_no_immediate_sweep ,
2014-04-22 01:54:17 +04:00
gc_stress_full_mark_after_malloc ,
2014-04-21 04:39:43 +04:00
gc_stress_max
} ;
2014-04-22 01:54:17 +04:00
# define gc_stress_full_mark_after_malloc_p() \
2014-09-09 08:56:55 +04:00
( FIXNUM_P ( ruby_gc_stress_mode ) & & ( FIX2LONG ( ruby_gc_stress_mode ) & ( 1 < < gc_stress_full_mark_after_malloc ) ) )
2014-04-22 01:54:17 +04:00
2014-09-09 08:12:14 +04:00
static void
2014-09-08 08:11:00 +04:00
heap_ready_to_gc ( rb_objspace_t * objspace , rb_heap_t * heap )
{
2014-09-09 08:12:14 +04:00
if ( ! heap - > freelist & & ! heap - > free_pages ) {
if ( ! heap_increment ( objspace , heap ) ) {
heap_set_increment ( objspace , 1 ) ;
heap_increment ( objspace , heap ) ;
2014-09-08 08:11:00 +04:00
}
}
}
static int
ready_to_gc ( rb_objspace_t * objspace )
{
2014-09-09 08:12:14 +04:00
if ( dont_gc | | during_gc | | ruby_disable_gc ) {
heap_ready_to_gc ( objspace , heap_eden ) ;
return FALSE ;
}
else {
return TRUE ;
}
2014-09-08 08:11:00 +04:00
}
static void
gc_reset_malloc_info ( rb_objspace_t * objspace )
{
gc_prof_set_malloc_info ( objspace ) ;
{
2018-05-18 11:40:16 +03:00
size_t inc = ATOMIC_SIZE_EXCHANGE ( malloc_increase , 0 ) ;
2014-09-08 08:11:00 +04:00
size_t old_limit = malloc_limit ;
if ( inc > malloc_limit ) {
malloc_limit = ( size_t ) ( inc * gc_params . malloc_limit_growth_factor ) ;
2018-06-05 03:37:05 +03:00
if ( malloc_limit > gc_params . malloc_limit_max ) {
2014-09-08 08:11:00 +04:00
malloc_limit = gc_params . malloc_limit_max ;
}
}
else {
malloc_limit = ( size_t ) ( malloc_limit * 0.98 ) ; /* magic number */
if ( malloc_limit < gc_params . malloc_limit_min ) {
malloc_limit = gc_params . malloc_limit_min ;
}
}
if ( 0 ) {
if ( old_limit ! = malloc_limit ) {
fprintf ( stderr , " [% " PRIuSIZE " ] malloc_limit: % " PRIuSIZE " -> % " PRIuSIZE " \n " ,
rb_gc_count ( ) , old_limit , malloc_limit ) ;
}
else {
fprintf ( stderr , " [% " PRIuSIZE " ] malloc_limit: not changed (% " PRIuSIZE " ) \n " ,
rb_gc_count ( ) , malloc_limit ) ;
}
}
}
/* reset oldmalloc info */
# if RGENGC_ESTIMATE_OLDMALLOC
if ( ! is_full_marking ( objspace ) ) {
if ( objspace - > rgengc . oldmalloc_increase > objspace - > rgengc . oldmalloc_increase_limit ) {
2016-08-24 16:42:08 +03:00
objspace - > rgengc . need_major_gc | = GPR_FLAG_MAJOR_BY_OLDMALLOC ;
2014-09-08 08:11:00 +04:00
objspace - > rgengc . oldmalloc_increase_limit =
( size_t ) ( objspace - > rgengc . oldmalloc_increase_limit * gc_params . oldmalloc_limit_growth_factor ) ;
if ( objspace - > rgengc . oldmalloc_increase_limit > gc_params . oldmalloc_limit_max ) {
objspace - > rgengc . oldmalloc_increase_limit = gc_params . oldmalloc_limit_max ;
}
}
if ( 0 ) fprintf ( stderr , " %d \t %d \t %u \t %u \t %d \n " ,
( int ) rb_gc_count ( ) ,
( int ) objspace - > rgengc . need_major_gc ,
( unsigned int ) objspace - > rgengc . oldmalloc_increase ,
( unsigned int ) objspace - > rgengc . oldmalloc_increase_limit ,
( unsigned int ) gc_params . oldmalloc_limit_max ) ;
}
else {
/* major GC */
objspace - > rgengc . oldmalloc_increase = 0 ;
if ( ( objspace - > profile . latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC ) = = 0 ) {
objspace - > rgengc . oldmalloc_increase_limit =
( size_t ) ( objspace - > rgengc . oldmalloc_increase_limit / ( ( gc_params . oldmalloc_limit_growth_factor - 1 ) / 10 + 1 ) ) ;
if ( objspace - > rgengc . oldmalloc_increase_limit < gc_params . oldmalloc_limit_min ) {
objspace - > rgengc . oldmalloc_increase_limit = gc_params . oldmalloc_limit_min ;
}
}
}
# endif
}
static int
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
garbage_collect ( rb_objspace_t * objspace , int reason )
2010-05-28 15:13:42 +04:00
{
2014-09-08 08:11:00 +04:00
# if GC_PROFILE_MORE_DETAIL
objspace - > profile . prepare_time = getrusage_time ( ) ;
# endif
gc_rest ( objspace ) ;
# if GC_PROFILE_MORE_DETAIL
objspace - > profile . prepare_time = getrusage_time ( ) - objspace - > profile . prepare_time ;
# endif
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
return gc_start ( objspace , reason ) ;
2014-09-08 08:11:00 +04:00
}
static int
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
gc_start ( rb_objspace_t * objspace , int reason )
2014-09-08 08:11:00 +04:00
{
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
unsigned int do_full_mark = ! ! ( ( unsigned ) reason & GPR_FLAG_FULL_MARK ) ;
unsigned int immediate_mark = ( unsigned ) reason & GPR_FLAG_IMMEDIATE_MARK ;
/* reason may be clobbered, later, so keep set immediate_sweep here */
objspace - > flags . immediate_sweep = ! ! ( ( unsigned ) reason & GPR_FLAG_IMMEDIATE_SWEEP ) ;
2014-09-08 08:11:00 +04:00
2015-11-19 15:57:20 +03:00
if ( ! heap_allocated_pages ) return FALSE ; /* heap is not ready */
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
if ( ! ( reason & GPR_FLAG_METHOD ) & & ! ready_to_gc ( objspace ) ) return TRUE ; /* GC is not allowed */
2014-09-08 08:11:00 +04:00
2017-06-22 08:03:18 +03:00
GC_ASSERT ( gc_mode ( objspace ) = = gc_mode_none ) ;
GC_ASSERT ( ! is_lazy_sweeping ( heap_eden ) ) ;
GC_ASSERT ( ! is_incremental_marking ( objspace ) ) ;
2014-09-08 08:11:00 +04:00
# if RGENGC_CHECK_MODE >= 2
2017-06-22 08:03:18 +03:00
gc_verify_internal_consistency ( Qnil ) ;
2014-09-08 08:11:00 +04:00
# endif
gc_enter ( objspace , " gc_start " ) ;
2014-09-09 17:09:14 +04:00
if ( ruby_gc_stressful ) {
2014-09-09 08:56:55 +04:00
int flag = FIXNUM_P ( ruby_gc_stress_mode ) ? FIX2INT ( ruby_gc_stress_mode ) : 0 ;
2013-06-19 01:35:40 +04:00
2014-07-24 15:13:19 +04:00
if ( ( flag & ( 1 < < gc_stress_no_major ) ) = = 0 ) {
2014-09-08 08:11:00 +04:00
do_full_mark = TRUE ;
2014-07-24 15:13:19 +04:00
}
2014-09-09 06:45:21 +04:00
objspace - > flags . immediate_sweep = ! ( flag & ( 1 < < gc_stress_no_immediate_sweep ) ) ;
2010-05-28 15:13:42 +04:00
}
2013-05-22 03:09:22 +04:00
else {
2013-12-18 19:27:41 +04:00
# if USE_RGENGC
2013-06-21 16:31:13 +04:00
if ( objspace - > rgengc . need_major_gc ) {
2013-11-24 23:49:02 +04:00
reason | = objspace - > rgengc . need_major_gc ;
2014-09-08 08:11:00 +04:00
do_full_mark = TRUE ;
2013-10-11 16:00:35 +04:00
}
2014-11-13 23:31:29 +03:00
else if ( RGENGC_FORCE_MAJOR_GC ) {
reason = GPR_FLAG_MAJOR_BY_FORCE ;
do_full_mark = TRUE ;
}
2014-09-08 08:11:00 +04:00
objspace - > rgengc . need_major_gc = GPR_FLAG_NONE ;
2013-06-21 03:10:34 +04:00
# endif
2013-12-18 19:27:41 +04:00
}
2010-05-28 15:13:42 +04:00
2014-09-08 08:11:00 +04:00
if ( do_full_mark & & ( reason & GPR_FLAG_MAJOR_MASK ) = = 0 ) {
2014-07-24 15:13:19 +04:00
reason | = GPR_FLAG_MAJOR_BY_FORCE ; /* GC by CAPI, METHOD, and so on. */
}
2014-09-08 08:11:00 +04:00
# if GC_ENABLE_INCREMENTAL_MARK
if ( ! GC_ENABLE_INCREMENTAL_MARK | | objspace - > flags . dont_incremental | | immediate_mark ) {
2014-09-09 06:45:21 +04:00
objspace - > flags . during_incremental_marking = FALSE ;
2014-09-08 08:11:00 +04:00
}
else {
2014-09-09 06:45:21 +04:00
objspace - > flags . during_incremental_marking = do_full_mark ;
2014-09-08 08:11:00 +04:00
}
# endif
if ( ! GC_ENABLE_LAZY_SWEEP | | objspace - > flags . dont_incremental ) {
2014-09-09 06:45:21 +04:00
objspace - > flags . immediate_sweep = TRUE ;
2014-09-08 08:11:00 +04:00
}
2014-09-09 06:45:21 +04:00
if ( objspace - > flags . immediate_sweep ) reason | = GPR_FLAG_IMMEDIATE_SWEEP ;
2012-02-21 13:35:07 +04:00
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
gc_report ( 1 , objspace , " gc_start(reason: %d) => %u, %d, %d \n " ,
reason ,
2014-09-09 06:45:21 +04:00
do_full_mark , ! is_incremental_marking ( objspace ) , objspace - > flags . immediate_sweep ) ;
2013-06-21 16:31:13 +04:00
2018-10-25 01:17:03 +03:00
# if USE_DEBUG_COUNTER
RB_DEBUG_COUNTER_INC ( gc_count ) ;
2018-10-25 05:23:58 +03:00
2018-10-25 01:17:03 +03:00
if ( reason & GPR_FLAG_MAJOR_MASK ) {
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_major_nofree , reason & GPR_FLAG_MAJOR_BY_NOFREE ) ;
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_major_oldgen , reason & GPR_FLAG_MAJOR_BY_OLDGEN ) ;
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_major_shady , reason & GPR_FLAG_MAJOR_BY_SHADY ) ;
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_major_force , reason & GPR_FLAG_MAJOR_BY_FORCE ) ;
2018-10-25 05:23:58 +03:00
# if RGENGC_ESTIMATE_OLDMALLOC
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_major_oldmalloc , reason & GPR_FLAG_MAJOR_BY_OLDMALLOC ) ;
# endif
2018-10-25 01:17:03 +03:00
}
else {
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_minor_newobj , reason & GPR_FLAG_NEWOBJ ) ;
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_minor_malloc , reason & GPR_FLAG_MALLOC ) ;
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_minor_method , reason & GPR_FLAG_METHOD ) ;
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_minor_capi , reason & GPR_FLAG_CAPI ) ;
2018-10-25 05:07:47 +03:00
( void ) RB_DEBUG_COUNTER_INC_IF ( gc_minor_stress , reason & GPR_FLAG_STRESS ) ;
2018-10-25 01:17:03 +03:00
}
# endif
2013-11-01 16:49:49 +04:00
objspace - > profile . count + + ;
2013-12-05 14:30:38 +04:00
objspace - > profile . latest_gc_info = reason ;
2014-09-09 14:01:18 +04:00
objspace - > profile . total_allocated_objects_at_gc_start = objspace - > total_allocated_objects ;
2014-09-09 13:33:52 +04:00
objspace - > profile . heap_used_at_gc_start = heap_allocated_pages ;
2013-06-24 02:58:01 +04:00
gc_prof_setup_new_record ( objspace , reason ) ;
2014-09-08 08:11:00 +04:00
gc_reset_malloc_info ( objspace ) ;
2018-10-31 00:53:56 +03:00
rb_transient_heap_start_marking ( do_full_mark ) ;
2014-09-08 08:11:00 +04:00
gc_event_hook ( objspace , RUBY_INTERNAL_EVENT_GC_START , 0 /* TODO: pass minor/immediate flag? */ ) ;
2017-06-22 08:03:18 +03:00
GC_ASSERT ( during_gc ) ;
2014-09-08 08:11:00 +04:00
2013-06-24 02:58:01 +04:00
gc_prof_timer_start ( objspace ) ;
2013-05-21 11:52:03 +04:00
{
2014-09-08 08:11:00 +04:00
gc_marks ( objspace , do_full_mark ) ;
2013-05-21 11:52:03 +04:00
}
2013-05-21 12:21:59 +04:00
gc_prof_timer_stop ( objspace ) ;
2013-05-21 11:52:03 +04:00
2014-09-08 08:11:00 +04:00
gc_exit ( objspace , " gc_start " ) ;
2009-07-18 12:05:32 +04:00
return TRUE ;
1998-01-16 15:13:05 +03:00
}
2014-09-08 08:11:00 +04:00
static void
gc_rest ( rb_objspace_t * objspace )
2013-07-17 12:25:11 +04:00
{
2014-09-08 08:11:00 +04:00
int marking = is_incremental_marking ( objspace ) ;
int sweeping = is_lazy_sweeping ( heap_eden ) ;
if ( marking | | sweeping ) {
gc_enter ( objspace , " gc_rest " ) ;
if ( RGENGC_CHECK_MODE > = 2 ) gc_verify_internal_consistency ( Qnil ) ;
if ( is_incremental_marking ( objspace ) ) {
PUSH_MARK_FUNC_DATA ( NULL ) ;
gc_marks_rest ( objspace ) ;
POP_MARK_FUNC_DATA ( ) ;
2013-07-17 12:25:11 +04:00
}
2014-09-08 08:11:00 +04:00
if ( is_lazy_sweeping ( heap_eden ) ) {
gc_sweep_rest ( objspace ) ;
}
gc_exit ( objspace , " gc_rest " ) ;
2013-07-17 12:25:11 +04:00
}
}
2014-09-08 08:11:00 +04:00
struct objspace_and_reason {
rb_objspace_t * objspace ;
int reason ;
} ;
2013-10-22 14:28:31 +04:00
2014-09-08 08:11:00 +04:00
static void
gc_current_status_fill ( rb_objspace_t * objspace , char * buff )
2013-05-22 03:09:22 +04:00
{
2014-09-08 08:11:00 +04:00
int i = 0 ;
if ( is_marking ( objspace ) ) {
buff [ i + + ] = ' M ' ;
# if USE_RGENGC
if ( is_full_marking ( objspace ) ) buff [ i + + ] = ' F ' ;
# if GC_ENABLE_INCREMENTAL_MARK
if ( is_incremental_marking ( objspace ) ) buff [ i + + ] = ' I ' ;
# endif
# endif
2013-05-22 03:09:22 +04:00
}
2014-09-08 08:11:00 +04:00
else if ( is_sweeping ( objspace ) ) {
buff [ i + + ] = ' S ' ;
if ( is_lazy_sweeping ( heap_eden ) ) buff [ i + + ] = ' L ' ;
}
else {
buff [ i + + ] = ' N ' ;
2013-05-22 03:09:22 +04:00
}
2014-09-08 08:11:00 +04:00
buff [ i ] = ' \0 ' ;
}
2013-05-22 03:09:22 +04:00
2014-09-08 08:11:00 +04:00
static const char *
gc_current_status ( rb_objspace_t * objspace )
{
static char buff [ 0x10 ] ;
gc_current_status_fill ( objspace , buff ) ;
return buff ;
}
# if PRINT_ENTER_EXIT_TICK
static tick_t last_exit_tick ;
static tick_t enter_tick ;
static int enter_count = 0 ;
static char last_gc_status [ 0x10 ] ;
static inline void
gc_record ( rb_objspace_t * objspace , int direction , const char * event )
{
if ( direction = = 0 ) { /* enter */
enter_count + + ;
enter_tick = tick ( ) ;
gc_current_status_fill ( objspace , last_gc_status ) ;
}
else { /* exit */
tick_t exit_tick = tick ( ) ;
char current_gc_status [ 0x10 ] ;
gc_current_status_fill ( objspace , current_gc_status ) ;
# if 1
/* [last mutator time] [gc time] [event] */
2014-11-19 01:17:54 +03:00
fprintf ( stderr , " % " PRItick " \t % " PRItick " \t %s \t [%s->%s|%c] \n " ,
2014-09-08 08:11:00 +04:00
enter_tick - last_exit_tick ,
exit_tick - enter_tick ,
event ,
2014-11-19 01:17:54 +03:00
last_gc_status , current_gc_status ,
( objspace - > profile . latest_gc_info & GPR_FLAG_MAJOR_MASK ) ? ' + ' : ' - ' ) ;
2014-09-08 08:11:00 +04:00
last_exit_tick = exit_tick ;
# else
/* [enter_tick] [gc time] [event] */
2014-11-19 01:17:54 +03:00
fprintf ( stderr , " % " PRItick " \t % " PRItick " \t %s \t [%s->%s|%c] \n " ,
2014-09-08 08:11:00 +04:00
enter_tick ,
exit_tick - enter_tick ,
event ,
2014-11-19 01:17:54 +03:00
last_gc_status , current_gc_status ,
( objspace - > profile . latest_gc_info & GPR_FLAG_MAJOR_MASK ) ? ' + ' : ' - ' ) ;
2013-06-16 00:18:11 +04:00
# endif
2014-09-08 08:11:00 +04:00
}
}
# else /* PRINT_ENTER_EXIT_TICK */
static inline void
gc_record ( rb_objspace_t * objspace , int direction , const char * event )
{
/* null */
}
# endif /* PRINT_ENTER_EXIT_TICK */
2013-05-22 03:09:22 +04:00
2014-09-08 08:11:00 +04:00
static inline void
gc_enter ( rb_objspace_t * objspace , const char * event )
{
2017-06-22 08:03:18 +03:00
GC_ASSERT ( during_gc = = 0 ) ;
2014-09-08 08:11:00 +04:00
if ( RGENGC_CHECK_MODE > = 3 ) gc_verify_internal_consistency ( Qnil ) ;
2013-05-22 03:09:22 +04:00
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
mjit_gc_start_hook ( ) ;
2014-09-08 08:11:00 +04:00
during_gc = TRUE ;
2018-04-04 10:46:06 +03:00
gc_report ( 1 , objspace , " gc_enter: %s [%s] \n " , event , gc_current_status ( objspace ) ) ;
2014-09-08 08:11:00 +04:00
gc_record ( objspace , 0 , event ) ;
2014-09-11 12:46:59 +04:00
gc_event_hook ( objspace , RUBY_INTERNAL_EVENT_GC_ENTER , 0 ) ; /* TODO: which parameter should be passed? */
2013-05-22 03:09:22 +04:00
}
2014-09-08 08:11:00 +04:00
static inline void
gc_exit ( rb_objspace_t * objspace , const char * event )
{
2017-06-22 08:03:18 +03:00
GC_ASSERT ( during_gc ! = 0 ) ;
2014-09-08 08:11:00 +04:00
2014-09-11 12:46:59 +04:00
gc_event_hook ( objspace , RUBY_INTERNAL_EVENT_GC_EXIT , 0 ) ; /* TODO: which parameter should be passsed? */
2014-09-08 08:11:00 +04:00
gc_record ( objspace , 1 , event ) ;
gc_report ( 1 , objspace , " gc_exit: %s [%s] \n " , event , gc_current_status ( objspace ) ) ;
during_gc = FALSE ;
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
mjit_gc_finish_hook ( ) ;
2014-09-08 08:11:00 +04:00
}
2013-05-21 12:19:07 +04:00
2012-08-05 14:39:37 +04:00
static void *
gc_with_gvl ( void * ptr )
{
2013-05-21 12:19:07 +04:00
struct objspace_and_reason * oar = ( struct objspace_and_reason * ) ptr ;
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
return ( void * ) ( VALUE ) garbage_collect ( oar - > objspace , oar - > reason ) ;
2012-08-05 14:39:37 +04:00
}
static int
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
garbage_collect_with_gvl ( rb_objspace_t * objspace , int reason )
2012-08-05 14:39:37 +04:00
{
if ( dont_gc ) return TRUE ;
if ( ruby_thread_has_gvl_p ( ) ) {
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
return garbage_collect ( objspace , reason ) ;
2012-08-05 14:39:37 +04:00
}
else {
if ( ruby_native_thread_p ( ) ) {
2013-05-21 12:19:07 +04:00
struct objspace_and_reason oar ;
oar . objspace = objspace ;
oar . reason = reason ;
return ( int ) ( VALUE ) rb_thread_call_with_gvl ( gc_with_gvl , ( void * ) & oar ) ;
2012-08-05 14:39:37 +04:00
}
else {
/* no ruby thread */
fprintf ( stderr , " [FATAL] failed to allocate memory \n " ) ;
exit ( EXIT_FAILURE ) ;
}
}
}
# undef Init_stack
2009-11-01 02:17:52 +03:00
2012-08-05 14:39:37 +04:00
void
Init_stack ( volatile VALUE * addr )
{
ruby_init_stack ( addr ) ;
2006-12-31 18:02:22 +03:00
}
2003-12-22 09:20:14 +03:00
/*
* call - seq :
2010-05-18 01:07:33 +04:00
* GC . start - > nil
2016-09-22 07:06:53 +03:00
* ObjectSpace . garbage_collect - > nil
* include GC ; garbage_collect - > nil
2014-03-21 11:33:03 +04:00
* GC . start ( full_mark : true , immediate_sweep : true ) - > nil
2016-09-20 17:02:53 +03:00
* ObjectSpace . garbage_collect ( full_mark : true , immediate_sweep : true ) - > nil
* include GC ; garbage_collect ( full_mark : true , immediate_sweep : true ) - > nil
2003-12-22 09:20:14 +03:00
*
* Initiates garbage collection , unless manually disabled .
2005-06-19 21:16:14 +04:00
*
2013-12-06 09:11:51 +04:00
* This method is defined with keyword arguments that default to true :
*
2014-03-21 11:33:03 +04:00
* def GC . start ( full_mark : true , immediate_sweep : true ) ; end
2013-12-06 09:11:51 +04:00
*
* Use full_mark : false to perform a minor GC .
* Use immediate_sweep : false to defer sweeping ( use lazy sweep ) .
2013-12-06 09:32:11 +04:00
*
* Note : These keyword arguments are implementation and version dependent . They
* are not guaranteed to be future - compatible , and may be ignored if the
* underlying implementation does not support them .
2003-12-22 09:20:14 +03:00
*/
2013-12-06 09:11:51 +04:00
static VALUE
gc_start_internal ( int argc , VALUE * argv , VALUE self )
{
rb_objspace_t * objspace = & rb_objspace ;
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
int reason = GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_METHOD ;
2013-12-06 09:11:51 +04:00
VALUE opt = Qnil ;
2014-09-08 08:11:00 +04:00
static ID keyword_ids [ 3 ] ;
2013-12-06 09:11:51 +04:00
rb_scan_args ( argc , argv , " 0: " , & opt ) ;
2013-12-06 11:22:25 +04:00
if ( ! NIL_P ( opt ) ) {
2014-09-08 08:11:00 +04:00
VALUE kwvals [ 3 ] ;
2013-12-06 12:10:47 +04:00
2013-12-06 11:22:25 +04:00
if ( ! keyword_ids [ 0 ] ) {
keyword_ids [ 0 ] = rb_intern ( " full_mark " ) ;
2014-09-08 08:11:00 +04:00
keyword_ids [ 1 ] = rb_intern ( " immediate_mark " ) ;
keyword_ids [ 2 ] = rb_intern ( " immediate_sweep " ) ;
2013-12-06 11:22:25 +04:00
}
2013-12-06 09:11:51 +04:00
2014-09-08 08:11:00 +04:00
rb_get_kwargs ( opt , keyword_ids , 0 , 3 , kwvals ) ;
2013-12-06 09:11:51 +04:00
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
if ( kwvals [ 0 ] ! = Qundef & & ! RTEST ( kwvals [ 0 ] ) ) {
reason & = ~ GPR_FLAG_FULL_MARK ;
}
if ( kwvals [ 1 ] ! = Qundef & & ! RTEST ( kwvals [ 1 ] ) ) {
reason & = ~ GPR_FLAG_IMMEDIATE_MARK ;
}
if ( kwvals [ 2 ] ! = Qundef & & ! RTEST ( kwvals [ 2 ] ) ) {
reason & = ~ GPR_FLAG_IMMEDIATE_SWEEP ;
}
2013-12-06 11:22:25 +04:00
}
2013-12-06 09:11:51 +04:00
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
garbage_collect ( objspace , reason ) ;
2016-07-22 00:12:53 +03:00
gc_finalize_deferred ( objspace ) ;
2013-12-06 09:11:51 +04:00
return Qnil ;
}
2001-01-29 08:10:42 +03:00
VALUE
* array.c: moved to ANSI function style from K&R function style.
(used protoize on windows, so still K&R remains on #ifdef part of
other platforms. And `foo _((boo))' stuff is still there)
[ruby-dev:26975]
* bignum.c, class.c, compar.c, dir.c, dln.c, dmyext.c, enum.c,
enumerator.c, error.c, eval.c, file.c, gc.c, hash.c, inits.c,
io.c, main.c, marshal.c, math.c, numeric.c, object.c, pack.c,
prec.c, process.c, random.c, range.c, re.c, regcomp.c, regenc.c,
regerror.c, regexec.c, regparse.c, regparse.h, ruby.c, signal.c,
sprintf.c, st.c, string.c, struct.c, time.c, util.h, variable.c,
version.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@9126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2005-09-12 14:44:21 +04:00
rb_gc_start ( void )
1998-01-16 15:13:05 +03:00
{
1999-01-20 07:59:39 +03:00
rb_gc ( ) ;
1998-01-16 15:13:05 +03:00
return Qnil ;
}
void
2012-08-05 14:39:37 +04:00
rb_gc ( void )
1998-01-16 15:13:05 +03:00
{
2012-08-05 14:39:37 +04:00
rb_objspace_t * objspace = & rb_objspace ;
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
int reason = GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI ;
garbage_collect ( objspace , reason ) ;
2016-07-22 00:12:53 +03:00
gc_finalize_deferred ( objspace ) ;
2012-08-05 14:39:37 +04:00
}
int
rb_during_gc ( void )
{
rb_objspace_t * objspace = & rb_objspace ;
return during_gc ;
1998-01-16 15:13:05 +03:00
}
2013-05-26 20:43:21 +04:00
# if RGENGC_PROFILE >= 2
2014-09-08 08:11:00 +04:00
static const char * type_name ( int type , VALUE obj ) ;
2013-05-26 20:43:21 +04:00
static void
2013-05-26 21:27:42 +04:00
gc_count_add_each_types ( VALUE hash , const char * name , const size_t * types )
2013-05-26 20:43:21 +04:00
{
2017-10-02 15:23:17 +03:00
VALUE result = rb_hash_new_with_size ( T_MASK ) ;
2013-05-26 20:43:21 +04:00
int i ;
for ( i = 0 ; i < T_MASK ; i + + ) {
const char * type = type_name ( i , 0 ) ;
rb_hash_aset ( result , ID2SYM ( rb_intern ( type ) ) , SIZET2NUM ( types [ i ] ) ) ;
}
rb_hash_aset ( hash , ID2SYM ( rb_intern ( name ) ) , result ) ;
}
# endif
2013-05-27 14:01:45 +04:00
size_t
rb_gc_count ( void )
{
2013-11-01 16:49:49 +04:00
return rb_objspace . profile . count ;
2013-05-27 14:01:45 +04:00
}
2003-12-22 09:20:14 +03:00
/*
2012-08-05 14:39:37 +04:00
* call - seq :
* GC . count - > Integer
2005-06-19 21:16:14 +04:00
*
2012-08-05 14:39:37 +04:00
* The number of times GC occurred .
2005-06-19 21:16:14 +04:00
*
2012-08-05 14:39:37 +04:00
* It returns the number of times GC occurred since the process started .
2005-06-19 21:16:14 +04:00
*
2003-12-22 09:20:14 +03:00
*/
2010-10-21 08:18:09 +04:00
static VALUE
2012-08-05 14:39:37 +04:00
gc_count ( VALUE self )
2010-10-21 08:18:09 +04:00
{
2013-05-27 14:01:45 +04:00
return SIZET2NUM ( rb_gc_count ( ) ) ;
2010-10-21 08:18:09 +04:00
}
2013-12-05 14:30:38 +04:00
static VALUE
2014-12-12 07:09:28 +03:00
gc_info_decode ( rb_objspace_t * objspace , const VALUE hash_or_key , const int orig_flags )
2013-12-05 14:30:38 +04:00
{
2014-12-12 07:09:28 +03:00
static VALUE sym_major_by = Qnil , sym_gc_by , sym_immediate_sweep , sym_have_finalizer , sym_state ;
2014-07-24 15:13:19 +04:00
static VALUE sym_nofree , sym_oldgen , sym_shady , sym_force , sym_stress ;
2013-12-16 06:50:45 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
static VALUE sym_oldmalloc ;
# endif
2013-12-05 14:30:38 +04:00
static VALUE sym_newobj , sym_malloc , sym_method , sym_capi ;
2014-12-12 07:09:28 +03:00
static VALUE sym_none , sym_marking , sym_sweeping ;
2013-12-05 14:30:38 +04:00
VALUE hash = Qnil , key = Qnil ;
2013-12-05 16:06:59 +04:00
VALUE major_by ;
2014-12-12 07:09:28 +03:00
VALUE flags = orig_flags ? orig_flags : objspace - > profile . latest_gc_info ;
2013-12-05 14:30:38 +04:00
2014-12-12 07:09:28 +03:00
if ( SYMBOL_P ( hash_or_key ) ) {
2013-12-05 14:30:38 +04:00
key = hash_or_key ;
2014-12-12 07:09:28 +03:00
}
else if ( RB_TYPE_P ( hash_or_key , T_HASH ) ) {
2013-12-05 14:30:38 +04:00
hash = hash_or_key ;
2014-12-12 07:09:28 +03:00
}
else {
2013-12-05 14:30:38 +04:00
rb_raise ( rb_eTypeError , " non-hash or symbol given " ) ;
2014-12-12 07:09:28 +03:00
}
2013-12-05 14:30:38 +04:00
if ( sym_major_by = = Qnil ) {
# define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
S ( major_by ) ;
S ( gc_by ) ;
S ( immediate_sweep ) ;
S ( have_finalizer ) ;
2014-12-12 07:09:28 +03:00
S ( state ) ;
2014-07-24 15:13:19 +04:00
S ( stress ) ;
2013-12-05 14:30:38 +04:00
S ( nofree ) ;
S ( oldgen ) ;
S ( shady ) ;
2014-07-24 15:13:19 +04:00
S ( force ) ;
2013-12-16 06:50:45 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
2013-12-05 14:30:38 +04:00
S ( oldmalloc ) ;
2013-12-16 06:50:45 +04:00
# endif
2013-12-05 14:30:38 +04:00
S ( newobj ) ;
S ( malloc ) ;
S ( method ) ;
S ( capi ) ;
2014-12-12 07:09:28 +03:00
S ( none ) ;
S ( marking ) ;
S ( sweeping ) ;
2013-12-05 14:30:38 +04:00
# undef S
}
# define SET(name, attr) \
if ( key = = sym_ # # name ) \
return ( attr ) ; \
else if ( hash ! = Qnil ) \
rb_hash_aset ( hash , sym_ # # name , ( attr ) ) ;
2013-12-05 16:49:07 +04:00
major_by =
2014-07-24 15:13:19 +04:00
( flags & GPR_FLAG_MAJOR_BY_NOFREE ) ? sym_nofree :
2013-12-05 16:06:59 +04:00
( flags & GPR_FLAG_MAJOR_BY_OLDGEN ) ? sym_oldgen :
( flags & GPR_FLAG_MAJOR_BY_SHADY ) ? sym_shady :
2014-07-24 15:13:19 +04:00
( flags & GPR_FLAG_MAJOR_BY_FORCE ) ? sym_force :
2013-12-05 14:30:38 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
2013-12-05 16:06:59 +04:00
( flags & GPR_FLAG_MAJOR_BY_OLDMALLOC ) ? sym_oldmalloc :
2013-12-05 14:30:38 +04:00
# endif
2013-12-05 16:06:59 +04:00
Qnil ;
SET ( major_by , major_by ) ;
2013-12-05 14:30:38 +04:00
SET ( gc_by ,
( flags & GPR_FLAG_NEWOBJ ) ? sym_newobj :
( flags & GPR_FLAG_MALLOC ) ? sym_malloc :
( flags & GPR_FLAG_METHOD ) ? sym_method :
( flags & GPR_FLAG_CAPI ) ? sym_capi :
( flags & GPR_FLAG_STRESS ) ? sym_stress :
Qnil
) ;
SET ( have_finalizer , ( flags & GPR_FLAG_HAVE_FINALIZE ) ? Qtrue : Qfalse ) ;
SET ( immediate_sweep , ( flags & GPR_FLAG_IMMEDIATE_SWEEP ) ? Qtrue : Qfalse ) ;
2014-12-12 07:09:28 +03:00
if ( orig_flags = = 0 ) {
2016-03-04 12:53:03 +03:00
SET ( state , gc_mode ( objspace ) = = gc_mode_none ? sym_none :
gc_mode ( objspace ) = = gc_mode_marking ? sym_marking : sym_sweeping ) ;
2014-12-12 07:09:28 +03:00
}
2013-12-05 14:30:38 +04:00
# undef SET
2014-03-31 01:55:59 +04:00
if ( ! NIL_P ( key ) ) { /* matched key should return above */
rb_raise ( rb_eArgError , " unknown key: % " PRIsVALUE , rb_sym2str ( key ) ) ;
}
2013-12-05 14:30:38 +04:00
return hash ;
}
VALUE
rb_gc_latest_gc_info ( VALUE key )
{
rb_objspace_t * objspace = & rb_objspace ;
2014-12-12 07:09:28 +03:00
return gc_info_decode ( objspace , key , 0 ) ;
2013-12-05 14:30:38 +04:00
}
/*
* call - seq :
* GC . latest_gc_info - > { : gc_by = > : newobj }
* GC . latest_gc_info ( hash ) - > hash
* GC . latest_gc_info ( : major_by ) - > : malloc
*
* Returns information about the most recent garbage collection .
*/
static VALUE
gc_latest_gc_info ( int argc , VALUE * argv , VALUE self )
{
rb_objspace_t * objspace = & rb_objspace ;
VALUE arg = Qnil ;
2018-12-06 10:49:24 +03:00
if ( rb_check_arity ( argc , 0 , 1 ) = = 1 ) {
arg = argv [ 0 ] ;
2013-12-05 14:30:38 +04:00
if ( ! SYMBOL_P ( arg ) & & ! RB_TYPE_P ( arg , T_HASH ) ) {
rb_raise ( rb_eTypeError , " non-hash or symbol given " ) ;
}
}
2018-12-06 10:49:24 +03:00
else {
2014-06-30 11:46:57 +04:00
arg = rb_hash_new ( ) ;
}
2013-12-05 14:30:38 +04:00
2014-12-12 07:09:28 +03:00
return gc_info_decode ( objspace , arg , 0 ) ;
2013-12-05 14:30:38 +04:00
}
2014-11-14 10:29:33 +03:00
enum gc_stat_sym {
gc_stat_sym_count ,
gc_stat_sym_heap_allocated_pages ,
gc_stat_sym_heap_sorted_length ,
gc_stat_sym_heap_allocatable_pages ,
gc_stat_sym_heap_available_slots ,
gc_stat_sym_heap_live_slots ,
gc_stat_sym_heap_free_slots ,
gc_stat_sym_heap_final_slots ,
gc_stat_sym_heap_marked_slots ,
gc_stat_sym_heap_eden_pages ,
gc_stat_sym_heap_tomb_pages ,
gc_stat_sym_total_allocated_pages ,
gc_stat_sym_total_freed_pages ,
gc_stat_sym_total_allocated_objects ,
gc_stat_sym_total_freed_objects ,
gc_stat_sym_malloc_increase_bytes ,
gc_stat_sym_malloc_increase_bytes_limit ,
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if USE_RGENGC
2014-11-14 10:29:33 +03:00
gc_stat_sym_minor_gc_count ,
gc_stat_sym_major_gc_count ,
gc_stat_sym_remembered_wb_unprotected_objects ,
gc_stat_sym_remembered_wb_unprotected_objects_limit ,
gc_stat_sym_old_objects ,
gc_stat_sym_old_objects_limit ,
2013-11-24 23:49:02 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
2014-11-14 10:29:33 +03:00
gc_stat_sym_oldmalloc_increase_bytes ,
gc_stat_sym_oldmalloc_increase_bytes_limit ,
2013-11-24 23:08:33 +04:00
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if RGENGC_PROFILE
2014-11-14 10:29:33 +03:00
gc_stat_sym_total_generated_normal_object_count ,
gc_stat_sym_total_generated_shady_object_count ,
gc_stat_sym_total_shade_operation_count ,
gc_stat_sym_total_promoted_count ,
gc_stat_sym_total_remembered_normal_object_count ,
gc_stat_sym_total_remembered_shady_object_count ,
# endif
# endif
gc_stat_sym_last
} ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
2014-11-14 10:29:33 +03:00
enum gc_stat_compat_sym {
gc_stat_compat_sym_gc_stat_heap_used ,
gc_stat_compat_sym_heap_eden_page_length ,
gc_stat_compat_sym_heap_tomb_page_length ,
gc_stat_compat_sym_heap_increment ,
gc_stat_compat_sym_heap_length ,
gc_stat_compat_sym_heap_live_slot ,
gc_stat_compat_sym_heap_free_slot ,
gc_stat_compat_sym_heap_final_slot ,
gc_stat_compat_sym_heap_swept_slot ,
# if USE_RGENGC
gc_stat_compat_sym_remembered_shady_object ,
gc_stat_compat_sym_remembered_shady_object_limit ,
gc_stat_compat_sym_old_object ,
gc_stat_compat_sym_old_object_limit ,
# endif
gc_stat_compat_sym_total_allocated_object ,
gc_stat_compat_sym_total_freed_object ,
gc_stat_compat_sym_malloc_increase ,
gc_stat_compat_sym_malloc_limit ,
# if RGENGC_ESTIMATE_OLDMALLOC
gc_stat_compat_sym_oldmalloc_increase ,
gc_stat_compat_sym_oldmalloc_limit ,
# endif
gc_stat_compat_sym_last
} ;
2013-12-05 11:45:13 +04:00
2014-11-14 10:29:33 +03:00
static VALUE gc_stat_symbols [ gc_stat_sym_last ] ;
static VALUE gc_stat_compat_symbols [ gc_stat_compat_sym_last ] ;
static VALUE gc_stat_compat_table ;
2013-12-05 11:45:13 +04:00
2014-11-14 10:29:33 +03:00
static void
setup_gc_stat_symbols ( void )
{
if ( gc_stat_symbols [ 0 ] = = 0 ) {
# define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
S ( count ) ;
2014-09-10 06:47:05 +04:00
S ( heap_allocated_pages ) ;
2014-09-09 13:33:52 +04:00
S ( heap_sorted_length ) ;
S ( heap_allocatable_pages ) ;
2014-09-10 05:42:09 +04:00
S ( heap_available_slots ) ;
S ( heap_live_slots ) ;
S ( heap_free_slots ) ;
2014-09-09 13:53:47 +04:00
S ( heap_final_slots ) ;
2014-09-09 14:55:18 +04:00
S ( heap_marked_slots ) ;
2014-09-09 13:33:52 +04:00
S ( heap_eden_pages ) ;
S ( heap_tomb_pages ) ;
2014-09-10 06:13:41 +04:00
S ( total_allocated_pages ) ;
S ( total_freed_pages ) ;
2014-09-09 14:01:18 +04:00
S ( total_allocated_objects ) ;
S ( total_freed_objects ) ;
2014-09-10 06:53:11 +04:00
S ( malloc_increase_bytes ) ;
S ( malloc_increase_bytes_limit ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if USE_RGENGC
S ( minor_gc_count ) ;
S ( major_gc_count ) ;
2014-09-10 06:35:17 +04:00
S ( remembered_wb_unprotected_objects ) ;
S ( remembered_wb_unprotected_objects_limit ) ;
S ( old_objects ) ;
S ( old_objects_limit ) ;
2013-11-24 23:49:02 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
2014-09-10 06:53:11 +04:00
S ( oldmalloc_increase_bytes ) ;
S ( oldmalloc_increase_bytes_limit ) ;
2013-11-24 23:08:33 +04:00
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if RGENGC_PROFILE
2014-09-10 02:32:09 +04:00
S ( total_generated_normal_object_count ) ;
S ( total_generated_shady_object_count ) ;
S ( total_shade_operation_count ) ;
S ( total_promoted_count ) ;
S ( total_remembered_normal_object_count ) ;
S ( total_remembered_shady_object_count ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# endif /* RGENGC_PROFILE */
2014-06-30 11:46:57 +04:00
# endif /* USE_RGENGC */
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# undef S
2014-11-14 10:29:33 +03:00
# define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s))
S ( gc_stat_heap_used ) ;
S ( heap_eden_page_length ) ;
S ( heap_tomb_page_length ) ;
S ( heap_increment ) ;
S ( heap_length ) ;
S ( heap_live_slot ) ;
S ( heap_free_slot ) ;
S ( heap_final_slot ) ;
S ( heap_swept_slot ) ;
# if USE_RGEGC
S ( remembered_shady_object ) ;
S ( remembered_shady_object_limit ) ;
S ( old_object ) ;
S ( old_object_limit ) ;
# endif
S ( total_allocated_object ) ;
S ( total_freed_object ) ;
S ( malloc_increase ) ;
S ( malloc_limit ) ;
# if RGENGC_ESTIMATE_OLDMALLOC
S ( oldmalloc_increase ) ;
S ( oldmalloc_limit ) ;
# endif
# undef S
{
VALUE table = gc_stat_compat_table = rb_hash_new ( ) ;
2014-11-14 10:59:55 +03:00
rb_obj_hide ( table ) ;
2014-11-14 10:29:33 +03:00
rb_gc_register_mark_object ( table ) ;
/* compatibility layer for Ruby 2.1 */
# define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s]
# define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s]
rb_hash_aset ( table , OLD_SYM ( gc_stat_heap_used ) , NEW_SYM ( heap_allocated_pages ) ) ;
rb_hash_aset ( table , OLD_SYM ( heap_eden_page_length ) , NEW_SYM ( heap_eden_pages ) ) ;
rb_hash_aset ( table , OLD_SYM ( heap_tomb_page_length ) , NEW_SYM ( heap_tomb_pages ) ) ;
rb_hash_aset ( table , OLD_SYM ( heap_increment ) , NEW_SYM ( heap_allocatable_pages ) ) ;
rb_hash_aset ( table , OLD_SYM ( heap_length ) , NEW_SYM ( heap_sorted_length ) ) ;
rb_hash_aset ( table , OLD_SYM ( heap_live_slot ) , NEW_SYM ( heap_live_slots ) ) ;
rb_hash_aset ( table , OLD_SYM ( heap_free_slot ) , NEW_SYM ( heap_free_slots ) ) ;
rb_hash_aset ( table , OLD_SYM ( heap_final_slot ) , NEW_SYM ( heap_final_slots ) ) ;
# if USE_RGEGC
rb_hash_aset ( table , OLD_SYM ( remembered_shady_object ) , NEW_SYM ( remembered_wb_unprotected_objects ) ) ;
rb_hash_aset ( table , OLD_SYM ( remembered_shady_object_limit ) , NEW_SYM ( remembered_wb_unprotected_objects_limit ) ) ;
rb_hash_aset ( table , OLD_SYM ( old_object ) , NEW_SYM ( old_objects ) ) ;
rb_hash_aset ( table , OLD_SYM ( old_object_limit ) , NEW_SYM ( old_objects_limit ) ) ;
# endif
rb_hash_aset ( table , OLD_SYM ( total_allocated_object ) , NEW_SYM ( total_allocated_objects ) ) ;
rb_hash_aset ( table , OLD_SYM ( total_freed_object ) , NEW_SYM ( total_freed_objects ) ) ;
rb_hash_aset ( table , OLD_SYM ( malloc_increase ) , NEW_SYM ( malloc_increase_bytes ) ) ;
rb_hash_aset ( table , OLD_SYM ( malloc_limit ) , NEW_SYM ( malloc_increase_bytes_limit ) ) ;
# if RGENGC_ESTIMATE_OLDMALLOC
rb_hash_aset ( table , OLD_SYM ( oldmalloc_increase ) , NEW_SYM ( oldmalloc_increase_bytes ) ) ;
rb_hash_aset ( table , OLD_SYM ( oldmalloc_limit ) , NEW_SYM ( oldmalloc_increase_bytes_limit ) ) ;
# endif
# undef OLD_SYM
# undef NEW_SYM
rb_obj_freeze ( table ) ;
}
}
}
static VALUE
2014-11-14 10:35:05 +03:00
compat_key ( VALUE key )
2014-11-14 10:29:33 +03:00
{
2014-11-14 10:59:55 +03:00
VALUE new_key = rb_hash_lookup ( gc_stat_compat_table , key ) ;
2014-11-14 10:29:33 +03:00
2014-11-14 10:35:05 +03:00
if ( ! NIL_P ( new_key ) ) {
2014-11-14 10:29:33 +03:00
static int warned = 0 ;
if ( warned = = 0 ) {
rb_warn ( " GC.stat keys were changed from Ruby 2.1. "
" In this case, you refer to obsolete `% " PRIsVALUE " ' (new key is `% " PRIsVALUE " '). "
" Please check <https://bugs.ruby-lang.org/issues/9924> for more information. " ,
key , new_key ) ;
warned = 1 ;
}
2014-11-14 10:35:05 +03:00
}
return new_key ;
}
static VALUE
default_proc_for_compat_func ( VALUE hash , VALUE dmy , int argc , VALUE * argv )
{
2014-11-14 10:59:55 +03:00
VALUE key , new_key ;
Check_Type ( hash , T_HASH ) ;
rb_check_arity ( argc , 2 , 2 ) ;
key = argv [ 1 ] ;
2014-11-14 10:35:05 +03:00
if ( ( new_key = compat_key ( key ) ) ! = Qnil ) {
2014-11-14 10:59:55 +03:00
return rb_hash_lookup ( hash , new_key ) ;
2014-11-14 10:29:33 +03:00
}
return Qnil ;
}
2015-12-08 18:07:41 +03:00
static size_t
2014-11-14 10:29:33 +03:00
gc_stat_internal ( VALUE hash_or_sym )
{
rb_objspace_t * objspace = & rb_objspace ;
VALUE hash = Qnil , key = Qnil ;
setup_gc_stat_symbols ( ) ;
if ( RB_TYPE_P ( hash_or_sym , T_HASH ) ) {
hash = hash_or_sym ;
if ( NIL_P ( RHASH_IFNONE ( hash ) ) ) {
static VALUE default_proc_for_compat = 0 ;
if ( default_proc_for_compat = = 0 ) { /* TODO: it should be */
default_proc_for_compat = rb_proc_new ( default_proc_for_compat_func , Qnil ) ;
rb_gc_register_mark_object ( default_proc_for_compat ) ;
}
rb_hash_set_default_proc ( hash , default_proc_for_compat ) ;
}
}
else if ( SYMBOL_P ( hash_or_sym ) ) {
key = hash_or_sym ;
}
else {
rb_raise ( rb_eTypeError , " non-hash or symbol argument " ) ;
2012-11-29 09:29:22 +04:00
}
2008-06-05 17:52:02 +04:00
2013-12-05 11:45:13 +04:00
# define SET(name, attr) \
2014-11-14 10:29:33 +03:00
if ( key = = gc_stat_symbols [ gc_stat_sym_ # # name ] ) \
2014-06-30 11:46:57 +04:00
return attr ; \
2013-12-05 11:45:13 +04:00
else if ( hash ! = Qnil ) \
2014-11-14 10:29:33 +03:00
rb_hash_aset ( hash , gc_stat_symbols [ gc_stat_sym_ # # name ] , SIZET2NUM ( attr ) ) ;
2009-06-17 01:07:26 +04:00
2014-11-14 10:35:05 +03:00
again :
2013-11-24 23:08:33 +04:00
SET ( count , objspace - > profile . count ) ;
2013-10-22 14:28:31 +04:00
2013-11-24 23:08:33 +04:00
/* implementation dependent counters */
2014-09-10 06:47:05 +04:00
SET ( heap_allocated_pages , heap_allocated_pages ) ;
2014-09-09 13:33:52 +04:00
SET ( heap_sorted_length , heap_pages_sorted_length ) ;
SET ( heap_allocatable_pages , heap_allocatable_pages ) ;
2014-09-10 05:42:09 +04:00
SET ( heap_available_slots , objspace_available_slots ( objspace ) ) ;
SET ( heap_live_slots , objspace_live_slots ( objspace ) ) ;
SET ( heap_free_slots , objspace_free_slots ( objspace ) ) ;
2014-09-09 13:53:47 +04:00
SET ( heap_final_slots , heap_pages_final_slots ) ;
2014-09-09 14:55:18 +04:00
SET ( heap_marked_slots , objspace - > marked_slots ) ;
2016-01-08 13:34:14 +03:00
SET ( heap_eden_pages , heap_eden - > total_pages ) ;
SET ( heap_tomb_pages , heap_tomb - > total_pages ) ;
2014-09-10 06:13:41 +04:00
SET ( total_allocated_pages , objspace - > profile . total_allocated_pages ) ;
SET ( total_freed_pages , objspace - > profile . total_freed_pages ) ;
2014-09-09 14:01:18 +04:00
SET ( total_allocated_objects , objspace - > total_allocated_objects ) ;
SET ( total_freed_objects , objspace - > profile . total_freed_objects ) ;
2018-05-18 11:40:16 +03:00
SET ( malloc_increase_bytes , malloc_increase ) ;
2014-09-10 06:53:11 +04:00
SET ( malloc_increase_bytes_limit , malloc_limit ) ;
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if USE_RGENGC
2013-11-24 23:08:33 +04:00
SET ( minor_gc_count , objspace - > profile . minor_gc_count ) ;
SET ( major_gc_count , objspace - > profile . major_gc_count ) ;
2015-03-18 21:02:13 +03:00
SET ( remembered_wb_unprotected_objects , objspace - > rgengc . uncollectible_wb_unprotected_objects ) ;
SET ( remembered_wb_unprotected_objects_limit , objspace - > rgengc . uncollectible_wb_unprotected_objects_limit ) ;
2014-09-10 06:35:17 +04:00
SET ( old_objects , objspace - > rgengc . old_objects ) ;
SET ( old_objects_limit , objspace - > rgengc . old_objects_limit ) ;
2013-11-24 23:49:02 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
2014-09-10 06:53:11 +04:00
SET ( oldmalloc_increase_bytes , objspace - > rgengc . oldmalloc_increase ) ;
SET ( oldmalloc_increase_bytes_limit , objspace - > rgengc . oldmalloc_increase_limit ) ;
2013-11-24 23:08:33 +04:00
# endif
* gc.c: support RGENGC. [ruby-trunk - Feature #8339]
See this ticet about RGENGC.
* gc.c: Add several flags:
* RGENGC_DEBUG: if >0, then prints debug information.
* RGENGC_CHECK_MODE: if >0, add assertions.
* RGENGC_PROFILE: if >0, add profiling features.
check GC.stat and GC::Profiler.
* include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0).
* array.c: add write barriers for T_ARRAY and generate sunny objects.
* include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if
you want to access raw pointers. If you modify the contents which
pointer pointed, then you need to care write barrier.
* bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects.
* complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX
and generate sunny objects.
* rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write
barriers for T_RATIONAL and generate sunny objects.
* internal.h: add write barriers for RBasic::klass.
* numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects.
* object.c (rb_class_allocate_instance), range.c:
generate sunny T_OBJECT objects.
* string.c: add write barriers for T_STRING and generate sunny objects.
* variable.c: add write barriers for ivars.
* vm_insnhelper.c (vm_setivar): ditto.
* include/ruby/ruby.h, debug.c: use two flags
FL_WB_PROTECTED and FL_OLDGEN.
* node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED):
move flag bits.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
# if RGENGC_PROFILE
2014-09-10 02:32:09 +04:00
SET ( total_generated_normal_object_count , objspace - > profile . total_generated_normal_object_count ) ;
SET ( total_generated_shady_object_count , objspace - > profile . total_generated_shady_object_count ) ;
SET ( total_shade_operation_count , objspace - > profile . total_shade_operation_count ) ;
SET ( total_promoted_count , objspace - > profile . total_promoted_count ) ;
SET ( total_remembered_normal_object_count , objspace - > profile . total_remembered_normal_object_count ) ;
SET ( total_remembered_shady_object_count , objspace - > profile . total_remembered_shady_object_count ) ;
2013-12-05 11:45:13 +04:00
# endif /* RGENGC_PROFILE */
# endif /* USE_RGENGC */
# undef SET
2014-03-31 01:55:59 +04:00
if ( ! NIL_P ( key ) ) { /* matched key should return above */
2014-11-14 10:35:05 +03:00
VALUE new_key ;
if ( ( new_key = compat_key ( key ) ) ! = Qnil ) {
key = new_key ;
goto again ;
}
2014-03-31 01:55:59 +04:00
rb_raise ( rb_eArgError , " unknown key: % " PRIsVALUE , rb_sym2str ( key ) ) ;
}
2013-12-05 11:45:13 +04:00
# if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
if ( hash ! = Qnil ) {
2013-06-18 06:27:37 +04:00
gc_count_add_each_types ( hash , " generated_normal_object_count_types " , objspace - > profile . generated_normal_object_count_types ) ;
2013-05-26 20:43:21 +04:00
gc_count_add_each_types ( hash , " generated_shady_object_count_types " , objspace - > profile . generated_shady_object_count_types ) ;
gc_count_add_each_types ( hash , " shade_operation_count_types " , objspace - > profile . shade_operation_count_types ) ;
2014-09-10 02:32:09 +04:00
gc_count_add_each_types ( hash , " promoted_types " , objspace - > profile . promoted_types ) ;
2013-06-07 05:17:19 +04:00
gc_count_add_each_types ( hash , " remembered_normal_object_count_types " , objspace - > profile . remembered_normal_object_count_types ) ;
2013-05-26 20:43:21 +04:00
gc_count_add_each_types ( hash , " remembered_shady_object_count_types " , objspace - > profile . remembered_shady_object_count_types ) ;
2013-05-15 12:07:30 +04:00
}
# endif
2013-12-05 11:45:13 +04:00
2014-06-30 11:46:57 +04:00
return 0 ;
1998-01-16 15:13:05 +03:00
}
2013-12-05 11:45:13 +04:00
/*
* call - seq :
* GC . stat - > Hash
* GC . stat ( hash ) - > hash
* GC . stat ( : key ) - > Numeric
*
* Returns a Hash containing information about the GC .
*
* The hash includes information about internal statistics about GC such as :
*
2014-09-10 06:59:46 +04:00
* {
* : count = > 0 ,
* : heap_allocated_pages = > 24 ,
* : heap_sorted_length = > 24 ,
* : heap_allocatable_pages = > 0 ,
* : heap_available_slots = > 9783 ,
* : heap_live_slots = > 7713 ,
* : heap_free_slots = > 2070 ,
2014-09-09 13:53:47 +04:00
* : heap_final_slots = > 0 ,
2014-09-10 06:59:46 +04:00
* : heap_marked_slots = > 0 ,
* : heap_eden_pages = > 24 ,
* : heap_tomb_pages = > 0 ,
* : total_allocated_pages = > 24 ,
* : total_freed_pages = > 0 ,
* : total_allocated_objects = > 7796 ,
* : total_freed_objects = > 83 ,
* : malloc_increase_bytes = > 2389312 ,
* : malloc_increase_bytes_limit = > 16777216 ,
* : minor_gc_count = > 0 ,
2013-12-05 11:45:13 +04:00
* : major_gc_count = > 0 ,
2014-09-10 06:59:46 +04:00
* : remembered_wb_unprotected_objects = > 0 ,
* : remembered_wb_unprotected_objects_limit = > 0 ,
* : old_objects = > 0 ,
* : old_objects_limit = > 0 ,
* : oldmalloc_increase_bytes = > 2389760 ,
* : oldmalloc_increase_bytes_limit = > 16777216
* }
2013-12-05 11:45:13 +04:00
*
* The contents of the hash are implementation specific and may be changed in
* the future .
*
* This method is only expected to work on C Ruby .
*
*/
static VALUE
gc_stat ( int argc , VALUE * argv , VALUE self )
{
VALUE arg = Qnil ;
2018-12-06 10:49:24 +03:00
if ( rb_check_arity ( argc , 0 , 1 ) = = 1 ) {
arg = argv [ 0 ] ;
2013-12-05 11:45:13 +04:00
if ( SYMBOL_P ( arg ) ) {
2014-06-30 11:46:57 +04:00
size_t value = gc_stat_internal ( arg ) ;
2013-12-05 11:45:13 +04:00
return SIZET2NUM ( value ) ;
2014-02-27 11:10:14 +04:00
}
else if ( ! RB_TYPE_P ( arg , T_HASH ) ) {
2013-12-05 14:30:38 +04:00
rb_raise ( rb_eTypeError , " non-hash or symbol given " ) ;
2013-12-05 11:45:13 +04:00
}
}
2018-12-06 10:49:24 +03:00
else {
2013-12-05 11:45:13 +04:00
arg = rb_hash_new ( ) ;
}
2014-06-30 11:46:57 +04:00
gc_stat_internal ( arg ) ;
2013-12-05 11:45:13 +04:00
return arg ;
}
size_t
rb_gc_stat ( VALUE key )
{
if ( SYMBOL_P ( key ) ) {
2014-06-30 11:46:57 +04:00
size_t value = gc_stat_internal ( key ) ;
2013-12-05 11:45:13 +04:00
return value ;
2014-02-27 11:10:14 +04:00
}
else {
2014-06-30 11:46:57 +04:00
gc_stat_internal ( key ) ;
2013-12-05 11:45:13 +04:00
return 0 ;
}
}
2003-12-22 09:20:14 +03:00
/*
* call - seq :
2016-09-08 07:57:49 +03:00
* GC . stress - > integer , true or false
2005-06-19 21:16:14 +04:00
*
2012-11-29 12:15:53 +04:00
* Returns current status of GC stress mode .
2003-12-22 09:20:14 +03:00
*/
1998-01-16 15:13:05 +03:00
static VALUE
2012-08-05 14:39:37 +04:00
gc_stress_get ( VALUE self )
1998-01-16 15:13:05 +03:00
{
2012-08-05 14:39:37 +04:00
rb_objspace_t * objspace = & rb_objspace ;
2014-09-09 08:56:55 +04:00
return ruby_gc_stress_mode ;
}
static void
gc_stress_set ( rb_objspace_t * objspace , VALUE flag )
{
2014-09-09 17:09:14 +04:00
objspace - > flags . gc_stressful = RTEST ( flag ) ;
2014-09-09 08:56:55 +04:00
objspace - > gc_stress_mode = flag ;
1998-01-16 15:13:05 +03:00
}
2003-12-22 09:20:14 +03:00
/*
* call - seq :
2014-04-21 04:39:43 +04:00
* GC . stress = flag - > flag
2005-06-19 21:16:14 +04:00
*
2012-08-05 14:39:37 +04:00
* Updates the GC stress mode .
2005-06-19 21:16:14 +04:00
*
2012-11-29 12:15:53 +04:00
* When stress mode is enabled , the GC is invoked at every GC opportunity :
2012-08-05 14:39:37 +04:00
* all memory and object allocations .
*
2012-11-29 12:15:53 +04:00
* Enabling stress mode will degrade performance , it is only for debugging .
2014-04-21 04:39:43 +04:00
*
2016-09-24 05:28:25 +03:00
* flag can be true , false , or an integer bit - ORed following flags .
2014-04-21 04:39:43 +04:00
* 0x01 : : no major GC
* 0x02 : : no immediate sweep
2014-04-22 01:54:17 +04:00
* 0x04 : : full mark after malloc / calloc / realloc
2003-12-22 09:20:14 +03:00
*/
2000-07-15 17:37:03 +04:00
static VALUE
2014-09-09 08:56:55 +04:00
gc_stress_set_m ( VALUE self , VALUE flag )
2012-03-13 07:37:06 +04:00
{
2012-08-05 14:39:37 +04:00
rb_objspace_t * objspace = & rb_objspace ;
2014-09-09 08:56:55 +04:00
gc_stress_set ( objspace , flag ) ;
2012-08-05 14:39:37 +04:00
return flag ;
2012-03-13 07:37:06 +04:00
}
2012-08-05 14:39:37 +04:00
/*
* call - seq :
* GC . enable - > true or false
*
2012-11-29 12:15:53 +04:00
* Enables garbage collection , returning + true + if garbage
2012-08-05 14:39:37 +04:00
* collection was previously disabled .
*
* GC . disable # = > false
* GC . enable # = > true
* GC . enable # = > false
*
*/
2012-03-13 07:37:06 +04:00
VALUE
2012-08-05 14:39:37 +04:00
rb_gc_enable ( void )
2000-07-15 17:37:03 +04:00
{
2008-04-27 07:20:35 +04:00
rb_objspace_t * objspace = & rb_objspace ;
2012-08-05 14:39:37 +04:00
int old = dont_gc ;
dont_gc = FALSE ;
return old ? Qtrue : Qfalse ;
2000-07-15 17:37:03 +04:00
}
2003-12-22 09:20:14 +03:00
/*
* call - seq :
2012-08-05 14:39:37 +04:00
* GC . disable - > true or false
2005-06-19 21:16:14 +04:00
*
2012-11-29 12:15:53 +04:00
* Disables garbage collection , returning + true + if garbage
2012-08-05 14:39:37 +04:00
* collection was already disabled .
*
* GC . disable # = > false
* GC . disable # = > true
2005-06-19 21:16:14 +04:00
*
2003-12-22 09:20:14 +03:00
*/
2012-08-05 14:39:37 +04:00
VALUE
rb_gc_disable ( void )
2000-07-15 17:37:03 +04:00
{
2012-08-05 14:39:37 +04:00
rb_objspace_t * objspace = & rb_objspace ;
int old = dont_gc ;
2000-07-15 17:37:03 +04:00
2014-09-08 08:11:00 +04:00
gc_rest ( objspace ) ;
2013-09-30 06:20:28 +04:00
2012-08-05 14:39:37 +04:00
dont_gc = TRUE ;
return old ? Qtrue : Qfalse ;
2012-03-13 07:37:06 +04:00
}
2013-09-27 13:36:48 +04:00
static int
2014-02-07 05:54:26 +04:00
get_envparam_size ( const char * name , size_t * default_value , size_t lower_bound )
2012-03-13 07:37:06 +04:00
{
2013-09-27 13:36:48 +04:00
char * ptr = getenv ( name ) ;
2014-02-07 05:54:26 +04:00
ssize_t val ;
2000-07-15 17:37:03 +04:00
2014-02-07 06:02:58 +04:00
if ( ptr ! = NULL & & * ptr ) {
2014-02-14 09:04:07 +04:00
size_t unit = 0 ;
2014-02-06 15:49:14 +04:00
char * end ;
2014-02-07 05:54:26 +04:00
# if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
val = strtoll ( ptr , & end , 0 ) ;
# else
val = strtol ( ptr , & end , 0 ) ;
# endif
2014-02-14 09:04:07 +04:00
switch ( * end ) {
case ' k ' : case ' K ' :
unit = 1024 ;
+ + end ;
break ;
case ' m ' : case ' M ' :
unit = 1024 * 1024 ;
+ + end ;
break ;
case ' g ' : case ' G ' :
unit = 1024 * 1024 * 1024 ;
+ + end ;
break ;
}
2014-04-17 12:24:10 +04:00
while ( * end & & isspace ( ( unsigned char ) * end ) ) end + + ;
2014-02-14 09:04:07 +04:00
if ( * end ) {
2014-02-06 15:49:14 +04:00
if ( RTEST ( ruby_verbose ) ) fprintf ( stderr , " invalid string for %s: %s \n " , name , ptr ) ;
return 0 ;
}
2014-02-14 09:04:07 +04:00
if ( unit > 0 ) {
if ( val < - ( ssize_t ) ( SIZE_MAX / 2 / unit ) | | ( ssize_t ) ( SIZE_MAX / 2 / unit ) < val ) {
if ( RTEST ( ruby_verbose ) ) fprintf ( stderr , " %s=%s is ignored because it overflows \n " , name , ptr ) ;
return 0 ;
}
val * = unit ;
}
2014-02-07 05:54:26 +04:00
if ( val > 0 & & ( size_t ) val > lower_bound ) {
if ( RTEST ( ruby_verbose ) ) {
2016-09-13 15:33:13 +03:00
fprintf ( stderr , " %s=% " PRIdSIZE " (default value: % " PRIuSIZE " ) \n " , name , val , * default_value ) ;
2014-02-07 05:54:26 +04:00
}
* default_value = ( size_t ) val ;
2014-02-07 05:59:16 +04:00
return 1 ;
2013-09-27 13:36:48 +04:00
}
else {
2014-02-07 05:54:26 +04:00
if ( RTEST ( ruby_verbose ) ) {
2016-09-13 15:33:13 +03:00
fprintf ( stderr , " %s=% " PRIdSIZE " (default value: % " PRIuSIZE " ) is ignored because it must be greater than % " PRIuSIZE " . \n " ,
2014-02-07 05:54:26 +04:00
name , val , * default_value , lower_bound ) ;
}
return 0 ;
2012-08-05 14:39:37 +04:00
}
2000-07-15 17:37:03 +04:00
}
2013-09-27 13:36:48 +04:00
return 0 ;
}
2012-08-05 14:39:37 +04:00
2013-09-27 13:36:48 +04:00
static int
2016-04-04 11:41:55 +03:00
get_envparam_double ( const char * name , double * default_value , double lower_bound , double upper_bound , int accept_zero )
2013-09-27 13:36:48 +04:00
{
char * ptr = getenv ( name ) ;
double val ;
2013-07-17 12:25:11 +04:00
2014-02-07 06:02:58 +04:00
if ( ptr ! = NULL & & * ptr ) {
2014-02-06 15:49:14 +04:00
char * end ;
val = strtod ( ptr , & end ) ;
if ( ! * ptr | | * end ) {
if ( RTEST ( ruby_verbose ) ) fprintf ( stderr , " invalid string for %s: %s \n " , name , ptr ) ;
return 0 ;
}
2016-04-04 11:41:55 +03:00
if ( accept_zero & & val = = 0.0 ) {
goto accept ;
}
else if ( val < = lower_bound ) {
if ( RTEST ( ruby_verbose ) ) {
fprintf ( stderr , " %s=%f (default value: %f) is ignored because it must be greater than %f. \n " ,
name , val , * default_value , lower_bound ) ;
}
}
else if ( upper_bound ! = 0.0 & & /* ignore upper_bound if it is 0.0 */
val > upper_bound ) {
if ( RTEST ( ruby_verbose ) ) {
fprintf ( stderr , " %s=%f (default value: %f) is ignored because it must be lower than %f. \n " ,
name , val , * default_value , upper_bound ) ;
}
}
else {
accept :
2014-02-17 07:33:03 +04:00
if ( RTEST ( ruby_verbose ) ) fprintf ( stderr , " %s=%f (default value: %f) \n " , name , val , * default_value ) ;
2013-09-27 13:36:48 +04:00
* default_value = val ;
return 1 ;
2012-08-05 14:39:37 +04:00
}
2013-03-13 18:52:00 +04:00
}
2013-09-27 13:36:48 +04:00
return 0 ;
}
2013-03-13 18:52:00 +04:00
2013-11-23 07:33:10 +04:00
static void
gc_set_initial_pages ( void )
{
size_t min_pages ;
rb_objspace_t * objspace = & rb_objspace ;
2016-01-09 01:15:40 +03:00
min_pages = gc_params . heap_init_slots / HEAP_PAGE_OBJ_LIMIT ;
2016-01-08 13:34:14 +03:00
if ( min_pages > heap_eden - > total_pages ) {
heap_add_pages ( objspace , heap_eden , min_pages - heap_eden - > total_pages ) ;
2013-11-23 07:33:10 +04:00
}
}
/*
* GC tuning environment variables
*
* * RUBY_GC_HEAP_INIT_SLOTS
* - Initial allocation slots .
* * RUBY_GC_HEAP_FREE_SLOTS
2014-01-19 09:43:28 +04:00
* - Prepare at least this amount of slots after GC .
2013-11-23 07:33:10 +04:00
* - Allocate slots if there are not enough slots .
* * RUBY_GC_HEAP_GROWTH_FACTOR ( new from 2.1 )
* - Allocate slots by this factor .
* - ( next slots number ) = ( current slots number ) * ( this factor )
* * RUBY_GC_HEAP_GROWTH_MAX_SLOTS ( new from 2.1 )
2014-12-16 01:39:33 +03:00
* - Allocation rate is limited to this number of slots .
2016-03-31 10:45:13 +03:00
* * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO ( new from 2.4 )
* - Allocate additional pages when the number of free slots is
* lower than the value ( total_slots * ( this ratio ) ) .
2016-03-31 12:16:48 +03:00
* * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO ( new from 2.4 )
* - Allocate slots to satisfy this formula :
* free_slots = total_slots * goal_ratio
* - In other words , prepare ( total_slots * goal_ratio ) free slots .
* - if this value is 0.0 , then use RUBY_GC_HEAP_GROWTH_FACTOR directly .
2016-03-31 10:45:13 +03:00
* * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO ( new from 2.4 )
* - Allow to free pages when the number of free slots is
* greater than the value ( total_slots * ( this ratio ) ) .
2014-02-17 07:27:13 +04:00
* * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR ( new from 2.1 .1 )
* - Do full GC when the number of old objects is more than R * N
* where R is this factor and
* N is the number of old objects just after last full GC .
2013-11-23 07:33:10 +04:00
*
* * obsolete
* * RUBY_FREE_MIN - > RUBY_GC_HEAP_FREE_SLOTS ( from 2.1 )
* * RUBY_HEAP_MIN_SLOTS - > RUBY_GC_HEAP_INIT_SLOTS ( from 2.1 )
*
* * RUBY_GC_MALLOC_LIMIT
* * RUBY_GC_MALLOC_LIMIT_MAX ( new from 2.1 )
* * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR ( new from 2.1 )
*
2013-11-24 22:13:48 +04:00
* * RUBY_GC_OLDMALLOC_LIMIT ( new from 2.1 )
* * RUBY_GC_OLDMALLOC_LIMIT_MAX ( new from 2.1 )
* * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR ( new from 2.1 )
2013-11-23 07:33:10 +04:00
*/
2013-09-27 13:36:48 +04:00
void
2013-12-05 04:19:13 +04:00
ruby_gc_set_params ( int safe_level )
2013-09-27 13:36:48 +04:00
{
2013-12-05 04:19:13 +04:00
if ( safe_level > 0 ) return ;
2013-09-27 13:36:48 +04:00
2013-11-23 07:33:10 +04:00
/* RUBY_GC_HEAP_FREE_SLOTS */
2014-02-07 05:54:26 +04:00
if ( get_envparam_size ( " RUBY_GC_HEAP_FREE_SLOTS " , & gc_params . heap_free_slots , 0 ) ) {
2013-12-21 23:11:12 +04:00
/* ok */
}
2014-02-07 05:54:26 +04:00
else if ( get_envparam_size ( " RUBY_FREE_MIN " , & gc_params . heap_free_slots , 0 ) ) {
2013-11-23 07:33:10 +04:00
rb_warn ( " RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead. " ) ;
}
2013-09-27 13:36:48 +04:00
2013-11-23 07:33:10 +04:00
/* RUBY_GC_HEAP_INIT_SLOTS */
2014-02-07 05:54:26 +04:00
if ( get_envparam_size ( " RUBY_GC_HEAP_INIT_SLOTS " , & gc_params . heap_init_slots , 0 ) ) {
2013-11-23 07:33:10 +04:00
gc_set_initial_pages ( ) ;
2012-03-13 07:37:06 +04:00
}
2014-02-07 05:54:26 +04:00
else if ( get_envparam_size ( " RUBY_HEAP_MIN_SLOTS " , & gc_params . heap_init_slots , 0 ) ) {
2013-12-21 23:11:12 +04:00
rb_warn ( " RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead. " ) ;
2013-11-23 07:33:10 +04:00
gc_set_initial_pages ( ) ;
}
2016-04-04 11:41:55 +03:00
get_envparam_double ( " RUBY_GC_HEAP_GROWTH_FACTOR " , & gc_params . growth_factor , 1.0 , 0.0 , FALSE ) ;
2014-02-07 05:54:26 +04:00
get_envparam_size ( " RUBY_GC_HEAP_GROWTH_MAX_SLOTS " , & gc_params . growth_max_slots , 0 ) ;
2016-04-04 11:41:55 +03:00
get_envparam_double ( " RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO " , & gc_params . heap_free_slots_min_ratio ,
0.0 , 1.0 , FALSE ) ;
get_envparam_double ( " RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO " , & gc_params . heap_free_slots_max_ratio ,
gc_params . heap_free_slots_min_ratio , 1.0 , FALSE ) ;
get_envparam_double ( " RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO " , & gc_params . heap_free_slots_goal_ratio ,
gc_params . heap_free_slots_min_ratio , gc_params . heap_free_slots_max_ratio , TRUE ) ;
get_envparam_double ( " RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR " , & gc_params . oldobject_limit_factor , 0.0 , 0.0 , TRUE ) ;
2013-09-27 13:36:48 +04:00
2014-02-07 05:54:26 +04:00
get_envparam_size ( " RUBY_GC_MALLOC_LIMIT " , & gc_params . malloc_limit_min , 0 ) ;
get_envparam_size ( " RUBY_GC_MALLOC_LIMIT_MAX " , & gc_params . malloc_limit_max , 0 ) ;
2018-06-05 03:37:05 +03:00
if ( ! gc_params . malloc_limit_max ) { /* ignore max-check if 0 */
gc_params . malloc_limit_max = SIZE_MAX ;
}
2016-04-04 11:41:55 +03:00
get_envparam_double ( " RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR " , & gc_params . malloc_limit_growth_factor , 1.0 , 0.0 , FALSE ) ;
2013-11-05 08:51:01 +04:00
2014-03-01 14:08:10 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
2014-02-07 05:54:26 +04:00
if ( get_envparam_size ( " RUBY_GC_OLDMALLOC_LIMIT " , & gc_params . oldmalloc_limit_min , 0 ) ) {
2014-02-06 04:04:14 +04:00
rb_objspace_t * objspace = & rb_objspace ;
objspace - > rgengc . oldmalloc_increase_limit = gc_params . oldmalloc_limit_min ;
}
2014-02-07 05:54:26 +04:00
get_envparam_size ( " RUBY_GC_OLDMALLOC_LIMIT_MAX " , & gc_params . oldmalloc_limit_max , 0 ) ;
2016-04-04 11:41:55 +03:00
get_envparam_double ( " RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR " , & gc_params . oldmalloc_limit_growth_factor , 1.0 , 0.0 , FALSE ) ;
2013-11-24 22:13:48 +04:00
# endif
2012-03-13 07:37:06 +04:00
}
2012-10-24 04:04:56 +04:00
void
rb_objspace_reachable_objects_from ( VALUE obj , void ( func ) ( VALUE , void * ) , void * data )
2012-10-05 12:14:09 +04:00
{
rb_objspace_t * objspace = & rb_objspace ;
2013-07-18 03:19:38 +04:00
if ( is_markable_object ( objspace , obj ) ) {
2012-10-05 12:14:09 +04:00
struct mark_func_data_struct mfd ;
2012-10-24 04:04:56 +04:00
mfd . mark_func = func ;
mfd . data = data ;
2014-09-08 08:11:00 +04:00
PUSH_MARK_FUNC_DATA ( & mfd ) ;
2012-10-05 12:14:09 +04:00
gc_mark_children ( objspace , obj ) ;
2014-09-08 08:11:00 +04:00
POP_MARK_FUNC_DATA ( ) ;
2012-10-05 12:14:09 +04:00
}
}
2013-10-15 14:22:33 +04:00
struct root_objects_data {
const char * category ;
void ( * func ) ( const char * category , VALUE , void * ) ;
void * data ;
} ;
static void
root_objects_from ( VALUE obj , void * ptr )
{
const struct root_objects_data * data = ( struct root_objects_data * ) ptr ;
( * data - > func ) ( data - > category , obj , data - > data ) ;
}
void
rb_objspace_reachable_objects_from_root ( void ( func ) ( const char * category , VALUE , void * ) , void * passing_data )
{
rb_objspace_t * objspace = & rb_objspace ;
struct root_objects_data data ;
struct mark_func_data_struct mfd ;
data . func = func ;
data . data = passing_data ;
2013-10-15 17:12:03 +04:00
2013-10-15 14:22:33 +04:00
mfd . mark_func = root_objects_from ;
mfd . data = & data ;
2014-09-08 08:11:00 +04:00
PUSH_MARK_FUNC_DATA ( & mfd ) ;
gc_mark_roots ( objspace , & data . category ) ;
POP_MARK_FUNC_DATA ( ) ;
2013-10-15 14:22:33 +04:00
}
2012-08-05 14:39:37 +04:00
/*
- - - - - - - - - - - - - - - - - - - - - - - - Extended allocator - - - - - - - - - - - - - - - - - - - - - - - -
*/
2002-12-04 10:39:32 +03:00
2013-12-05 12:52:30 +04:00
static void objspace_xfree ( rb_objspace_t * objspace , void * ptr , size_t size ) ;
2002-12-04 10:39:32 +03:00
2012-08-05 14:39:37 +04:00
static void *
negative_size_allocation_error_with_gvl ( void * ptr )
1999-08-13 09:45:20 +04:00
{
2012-08-05 14:39:37 +04:00
rb_raise ( rb_eNoMemError , " %s " , ( const char * ) ptr ) ;
return 0 ; /* should not be reached */
1999-08-13 09:45:20 +04:00
}
1998-01-16 15:13:05 +03:00
static void
2012-08-05 14:39:37 +04:00
negative_size_allocation_error ( const char * msg )
1998-01-16 15:13:05 +03:00
{
2012-08-05 14:39:37 +04:00
if ( ruby_thread_has_gvl_p ( ) ) {
rb_raise ( rb_eNoMemError , " %s " , msg ) ;
2010-02-16 15:34:09 +03:00
}
2010-11-04 14:46:26 +03:00
else {
2012-08-05 14:39:37 +04:00
if ( ruby_native_thread_p ( ) ) {
rb_thread_call_with_gvl ( negative_size_allocation_error_with_gvl , ( void * ) msg ) ;
}
else {
fprintf ( stderr , " [FATAL] %s \n " , msg ) ;
exit ( EXIT_FAILURE ) ;
}
2010-02-16 15:34:09 +03:00
}
}
2012-08-05 14:39:37 +04:00
static void *
ruby_memerror_body ( void * dummy )
2010-02-16 15:34:09 +03:00
{
2012-08-05 14:39:37 +04:00
rb_memerror ( ) ;
return 0 ;
1998-01-16 15:19:22 +03:00
}
2008-04-27 07:20:35 +04:00
static void
2012-08-05 14:39:37 +04:00
ruby_memerror ( void )
2008-06-30 13:57:07 +04:00
{
2012-08-05 14:39:37 +04:00
if ( ruby_thread_has_gvl_p ( ) ) {
rb_memerror ( ) ;
}
else {
if ( ruby_native_thread_p ( ) ) {
rb_thread_call_with_gvl ( ruby_memerror_body , 0 ) ;
}
else {
/* no ruby thread */
fprintf ( stderr , " [FATAL] failed to allocate memory \n " ) ;
exit ( EXIT_FAILURE ) ;
2008-07-27 09:59:32 +04:00
}
2008-06-30 13:57:07 +04:00
}
2010-02-16 15:34:09 +03:00
}
2012-08-05 14:39:37 +04:00
void
rb_memerror ( void )
2010-02-16 15:34:09 +03:00
{
2017-11-07 08:22:09 +03:00
rb_execution_context_t * ec = GET_EC ( ) ;
rb_objspace_t * objspace = rb_objspace_of ( rb_ec_vm_ptr ( ec ) ) ;
2017-04-17 05:31:35 +03:00
VALUE exc ;
2014-09-08 08:11:00 +04:00
if ( during_gc ) gc_exit ( objspace , " rb_memerror " ) ;
2017-04-17 05:31:35 +03:00
exc = nomem_error ;
if ( ! exc | |
2017-11-07 08:22:09 +03:00
rb_ec_raised_p ( ec , RAISED_NOMEMORY ) ) {
2012-08-05 14:39:37 +04:00
fprintf ( stderr , " [FATAL] failed to allocate memory \n " ) ;
exit ( EXIT_FAILURE ) ;
}
2017-11-07 08:22:09 +03:00
if ( rb_ec_raised_p ( ec , RAISED_NOMEMORY ) ) {
rb_ec_raised_clear ( ec ) ;
2012-08-05 14:39:37 +04:00
}
2017-04-17 05:31:35 +03:00
else {
2017-11-07 08:22:09 +03:00
rb_ec_raised_set ( ec , RAISED_NOMEMORY ) ;
2017-04-17 05:31:35 +03:00
exc = ruby_vm_special_exception_copy ( exc ) ;
}
2017-11-07 08:22:09 +03:00
ec - > errinfo = exc ;
EC_JUMP_TAG ( ec , TAG_RAISE ) ;
2008-06-30 13:57:07 +04:00
}
2018-10-31 00:53:56 +03:00
void *
rb_aligned_malloc ( size_t alignment , size_t size )
1998-01-16 15:19:22 +03:00
{
2012-08-05 14:39:37 +04:00
void * res ;
# if defined __MINGW32__
res = __mingw_aligned_malloc ( size , alignment ) ;
2015-10-18 05:08:56 +03:00
# elif defined _WIN32
2013-03-31 01:08:30 +04:00
void * _aligned_malloc ( size_t , size_t ) ;
2012-08-05 14:39:37 +04:00
res = _aligned_malloc ( size , alignment ) ;
# elif defined(HAVE_POSIX_MEMALIGN)
if ( posix_memalign ( & res , alignment , size ) = = 0 ) {
return res ;
}
else {
return NULL ;
}
# elif defined(HAVE_MEMALIGN)
res = memalign ( alignment , size ) ;
# else
char * aligned ;
res = malloc ( alignment + size + sizeof ( void * ) ) ;
aligned = ( char * ) res + alignment + sizeof ( void * ) ;
aligned - = ( ( VALUE ) aligned & ( alignment - 1 ) ) ;
( ( void * * ) aligned ) [ - 1 ] = res ;
res = ( void * ) aligned ;
# endif
/* alignment must be a power of 2 */
2017-06-22 08:03:18 +03:00
GC_ASSERT ( ( ( alignment - 1 ) & alignment ) = = 0 ) ;
GC_ASSERT ( alignment % sizeof ( void * ) = = 0 ) ;
2012-08-05 14:39:37 +04:00
return res ;
2009-09-18 11:29:17 +04:00
}
2018-10-31 00:53:56 +03:00
void
rb_aligned_free ( void * ptr )
2009-09-18 11:29:17 +04:00
{
2012-08-05 14:39:37 +04:00
# if defined __MINGW32__
__mingw_aligned_free ( ptr ) ;
2015-10-18 05:08:56 +03:00
# elif defined _WIN32
2012-08-05 14:39:37 +04:00
_aligned_free ( ptr ) ;
# elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
free ( ptr ) ;
# else
free ( ( ( void * * ) ptr ) [ - 1 ] ) ;
# endif
}
2011-09-29 20:54:14 +04:00
2013-12-05 08:54:20 +04:00
static inline size_t
2013-12-05 12:52:30 +04:00
objspace_malloc_size ( rb_objspace_t * objspace , void * ptr , size_t hint )
2013-12-05 08:54:20 +04:00
{
# ifdef HAVE_MALLOC_USABLE_SIZE
return malloc_usable_size ( ptr ) ;
# else
return hint ;
# endif
}
enum memop_type {
2018-06-02 00:23:20 +03:00
MEMOP_TYPE_MALLOC = 0 ,
MEMOP_TYPE_FREE ,
MEMOP_TYPE_REALLOC
2013-12-05 08:54:20 +04:00
} ;
2013-12-06 12:53:47 +04:00
static inline void
atomic_sub_nounderflow ( size_t * var , size_t sub )
{
if ( sub = = 0 ) return ;
while ( 1 ) {
size_t val = * var ;
2013-12-06 14:09:38 +04:00
if ( val < sub ) sub = val ;
2013-12-06 12:53:47 +04:00
if ( ATOMIC_SIZE_CAS ( * var , val , val - sub ) = = val ) break ;
}
}
2014-06-10 20:55:32 +04:00
static void
objspace_malloc_gc_stress ( rb_objspace_t * objspace )
{
2014-09-09 17:09:14 +04:00
if ( ruby_gc_stressful & & ruby_native_thread_p ( ) ) {
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
GPR_FLAG_STRESS | GPR_FLAG_MALLOC ;
if ( gc_stress_full_mark_after_malloc_p ( ) ) {
reason | = GPR_FLAG_FULL_MARK ;
}
garbage_collect_with_gvl ( objspace , reason ) ;
2014-06-10 20:55:32 +04:00
}
}
2013-09-27 12:01:14 +04:00
static void
2013-12-05 12:52:30 +04:00
objspace_malloc_increase ( rb_objspace_t * objspace , void * mem , size_t new_size , size_t old_size , enum memop_type type )
2013-09-27 12:01:14 +04:00
{
2018-05-18 11:40:16 +03:00
if ( new_size > old_size ) {
ATOMIC_SIZE_ADD ( malloc_increase , new_size - old_size ) ;
# if RGENGC_ESTIMATE_OLDMALLOC
ATOMIC_SIZE_ADD ( objspace - > rgengc . oldmalloc_increase , new_size - old_size ) ;
# endif
2013-10-17 11:57:03 +04:00
}
2018-05-18 11:40:16 +03:00
else {
atomic_sub_nounderflow ( & malloc_increase , old_size - new_size ) ;
# if RGENGC_ESTIMATE_OLDMALLOC
atomic_sub_nounderflow ( & objspace - > rgengc . oldmalloc_increase , old_size - new_size ) ;
# endif
2013-10-17 11:57:03 +04:00
}
2013-09-27 12:01:14 +04:00
2014-04-22 03:37:18 +04:00
if ( type = = MEMOP_TYPE_MALLOC ) {
retry :
2018-05-18 11:40:16 +03:00
if ( malloc_increase > malloc_limit & & ruby_native_thread_p ( ) & & ! dont_gc ) {
2014-04-22 03:37:18 +04:00
if ( ruby_thread_has_gvl_p ( ) & & is_lazy_sweeping ( heap_eden ) ) {
2014-09-08 08:11:00 +04:00
gc_rest ( objspace ) ; /* gc_rest can reduce malloc_increase */
2014-04-22 03:37:18 +04:00
goto retry ;
2013-10-26 06:34:23 +04:00
}
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
garbage_collect_with_gvl ( objspace , GPR_FLAG_MALLOC ) ;
2013-10-26 06:34:23 +04:00
}
2013-09-27 12:01:14 +04:00
}
2013-12-05 08:54:20 +04:00
# if MALLOC_ALLOCATED_SIZE
if ( new_size > = old_size ) {
ATOMIC_SIZE_ADD ( objspace - > malloc_params . allocated_size , new_size - old_size ) ;
}
else {
size_t dec_size = old_size - new_size ;
2013-12-06 12:53:47 +04:00
size_t allocated_size = objspace - > malloc_params . allocated_size ;
2013-12-05 08:54:20 +04:00
# if MALLOC_ALLOCATED_SIZE_CHECK
2013-12-06 12:53:47 +04:00
if ( allocated_size < dec_size ) {
rb_bug ( " objspace_malloc_increase: underflow malloc_params.allocated_size. " ) ;
2013-12-05 08:54:20 +04:00
}
2013-12-06 12:53:47 +04:00
# endif
2014-04-28 12:46:36 +04:00
atomic_sub_nounderflow ( & objspace - > malloc_params . allocated_size , dec_size ) ;
2013-12-05 08:54:20 +04:00
}
2014-05-24 10:20:34 +04:00
if ( 0 ) fprintf ( stderr , " increase - ptr: %p, type: %s, new_size: %d, old_size: %d \n " ,
2013-12-05 08:54:20 +04:00
mem ,
type = = MEMOP_TYPE_MALLOC ? " malloc " :
type = = MEMOP_TYPE_FREE ? " free " :
type = = MEMOP_TYPE_REALLOC ? " realloc " : " error " ,
( int ) new_size , ( int ) old_size ) ;
switch ( type ) {
case MEMOP_TYPE_MALLOC :
ATOMIC_SIZE_INC ( objspace - > malloc_params . allocations ) ;
break ;
case MEMOP_TYPE_FREE :
2013-12-06 12:53:47 +04:00
{
2013-12-05 08:54:20 +04:00
size_t allocations = objspace - > malloc_params . allocations ;
if ( allocations > 0 ) {
2014-04-28 12:46:36 +04:00
atomic_sub_nounderflow ( & objspace - > malloc_params . allocations , 1 ) ;
2013-12-05 08:54:20 +04:00
}
# if MALLOC_ALLOCATED_SIZE_CHECK
2013-12-06 12:53:47 +04:00
else {
2017-06-22 08:03:18 +03:00
GC_ASSERT ( objspace - > malloc_params . allocations > 0 ) ;
2013-12-05 08:54:20 +04:00
}
2013-12-06 12:53:47 +04:00
# endif
2013-12-05 08:54:20 +04:00
}
break ;
case MEMOP_TYPE_REALLOC : /* ignore */ break ;
}
# endif
2013-09-27 12:01:14 +04:00
}
2018-06-20 10:53:29 +03:00
struct malloc_obj_info { /* 4 words */
size_t size ;
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
size_t gen ;
const char * file ;
size_t line ;
# endif
} ;
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
const char * ruby_malloc_info_file ;
int ruby_malloc_info_line ;
# endif
2012-08-05 14:39:37 +04:00
static inline size_t
2013-12-05 12:52:30 +04:00
objspace_malloc_prepare ( rb_objspace_t * objspace , size_t size )
2012-08-05 14:39:37 +04:00
{
if ( size = = 0 ) size = 1 ;
2010-10-31 18:41:14 +03:00
2012-08-05 14:39:37 +04:00
# if CALC_EXACT_MALLOC_SIZE
2018-06-20 10:53:29 +03:00
size + = sizeof ( struct malloc_obj_info ) ;
2012-08-05 14:39:37 +04:00
# endif
2010-10-31 18:41:14 +03:00
2012-08-05 14:39:37 +04:00
return size ;
1998-01-16 15:19:22 +03:00
}
2012-08-05 14:39:37 +04:00
static inline void *
2013-12-05 12:52:30 +04:00
objspace_malloc_fixup ( rb_objspace_t * objspace , void * mem , size_t size )
2008-04-27 07:20:35 +04:00
{
2017-05-12 10:48:05 +03:00
size = objspace_malloc_size ( objspace , mem , size ) ;
objspace_malloc_increase ( objspace , mem , size , 0 , MEMOP_TYPE_MALLOC ) ;
2012-08-05 14:39:37 +04:00
# if CALC_EXACT_MALLOC_SIZE
2018-06-20 10:53:29 +03:00
{
struct malloc_obj_info * info = mem ;
info - > size = size ;
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
info - > gen = objspace - > profile . count ;
info - > file = ruby_malloc_info_file ;
info - > line = info - > file ? ruby_malloc_info_line : 0 ;
# else
info - > file = NULL ;
# endif
mem = info + 1 ;
}
2012-08-05 14:39:37 +04:00
# endif
return mem ;
2011-09-29 15:10:46 +04:00
}
2012-08-05 14:39:37 +04:00
# define TRY_WITH_GC(alloc) do { \
2014-06-10 20:55:32 +04:00
objspace_malloc_gc_stress ( objspace ) ; \
2012-08-05 14:39:37 +04:00
if ( ! ( alloc ) & & \
gc.c: reduce parameters for gc_start and garbage_collect
Every time I look at gc.c, I get confused by argument ordering:
gc_start(..., TRUE, TRUE, FALSE, ...)
gc_start(..., FALSE, FALSE, FALSE, ... )
While we do not have kwargs in C, we can use flags to improve readability:
gc_start(...,
GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
GPR_FLAG_IMMEDIATE_SWEEP | ...)
[ruby-core:87311] [Misc #14798]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63575 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-06-05 23:16:21 +03:00
( ! garbage_collect_with_gvl ( objspace , GPR_FLAG_FULL_MARK | \
GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
GPR_FLAG_MALLOC ) | | \
2012-08-05 14:39:37 +04:00
! ( alloc ) ) ) { \
ruby_memerror ( ) ; \
} \
} while ( 0 )
2017-05-12 10:48:08 +03:00
/* these shouldn't be called directly.
* objspace_ * functinos do not check allocation size .
2016-04-21 23:59:39 +03:00
*/
2012-08-05 14:39:37 +04:00
static void *
2016-04-21 23:59:39 +03:00
objspace_xmalloc0 ( rb_objspace_t * objspace , size_t size )
2011-09-08 04:02:55 +04:00
{
2012-08-05 14:39:37 +04:00
void * mem ;
2013-12-05 12:52:30 +04:00
size = objspace_malloc_prepare ( objspace , size ) ;
2012-08-05 14:39:37 +04:00
TRY_WITH_GC ( mem = malloc ( size ) ) ;
2018-09-25 21:13:29 +03:00
RB_DEBUG_COUNTER_INC ( heap_xmalloc ) ;
2013-12-05 12:52:30 +04:00
return objspace_malloc_fixup ( objspace , mem , size ) ;
2011-09-08 04:02:55 +04:00
}
2016-04-22 14:42:31 +03:00
static inline size_t
xmalloc2_size ( const size_t count , const size_t elsize )
{
size_t ret ;
if ( rb_mul_size_overflow ( count , elsize , SSIZE_MAX , & ret ) ) {
ruby_malloc_size_overflow ( count , elsize ) ;
}
return ret ;
}
2012-08-05 14:39:37 +04:00
static void *
2013-12-05 12:52:30 +04:00
objspace_xrealloc ( rb_objspace_t * objspace , void * ptr , size_t new_size , size_t old_size )
2011-09-29 15:10:46 +04:00
{
2012-08-05 14:39:37 +04:00
void * mem ;
2011-09-29 15:10:46 +04:00
2017-05-10 17:25:03 +03:00
if ( ! ptr ) return objspace_xmalloc0 ( objspace , new_size ) ;
2013-01-05 00:05:42 +04:00
/*
* The behavior of realloc ( ptr , 0 ) is implementation defined .
* Therefore we don ' t use realloc ( ptr , 0 ) for portability reason .
* see http : //www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
*/
2013-10-17 11:57:03 +04:00
if ( new_size = = 0 ) {
2013-12-05 12:52:30 +04:00
objspace_xfree ( objspace , ptr , old_size ) ;
2012-08-05 14:39:37 +04:00
return 0 ;
}
2013-09-27 12:01:14 +04:00
2012-08-05 14:39:37 +04:00
# if CALC_EXACT_MALLOC_SIZE
2018-06-20 10:53:29 +03:00
{
struct malloc_obj_info * info = ( struct malloc_obj_info * ) ptr - 1 ;
new_size + = sizeof ( struct malloc_obj_info ) ;
ptr = info ;
old_size = info - > size ;
}
2005-07-27 11:27:19 +04:00
# endif
1998-01-16 15:19:22 +03:00
2013-12-05 12:52:30 +04:00
old_size = objspace_malloc_size ( objspace , ptr , old_size ) ;
2013-10-17 11:57:03 +04:00
TRY_WITH_GC ( mem = realloc ( ptr , new_size ) ) ;
2013-12-05 12:52:30 +04:00
new_size = objspace_malloc_size ( objspace , mem , new_size ) ;
2005-07-27 11:27:19 +04:00
2012-08-05 14:39:37 +04:00
# if CALC_EXACT_MALLOC_SIZE
2018-06-20 10:53:29 +03:00
{
struct malloc_obj_info * info = mem ;
info - > size = new_size ;
mem = info + 1 ;
}
2012-08-05 14:39:37 +04:00
# endif
2006-03-02 08:22:30 +03:00
2013-12-05 12:52:30 +04:00
objspace_malloc_increase ( objspace , mem , new_size , old_size , MEMOP_TYPE_REALLOC ) ;
2013-12-05 08:54:20 +04:00
2018-09-25 21:13:29 +03:00
RB_DEBUG_COUNTER_INC ( heap_xrealloc ) ;
2012-08-05 14:39:37 +04:00
return mem ;
}
1999-01-20 07:59:39 +03:00
2018-09-25 21:13:29 +03:00
# if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
2018-06-20 10:53:29 +03:00
# define MALLOC_INFO_GEN_SIZE 100
# define MALLOC_INFO_SIZE_SIZE 10
static size_t malloc_info_gen_cnt [ MALLOC_INFO_GEN_SIZE ] ;
static size_t malloc_info_gen_size [ MALLOC_INFO_GEN_SIZE ] ;
static size_t malloc_info_size [ MALLOC_INFO_SIZE_SIZE + 1 ] ;
static st_table * malloc_info_file_table ;
static int
mmalloc_info_file_i ( st_data_t key , st_data_t val , st_data_t dmy )
{
const char * file = ( void * ) key ;
const size_t * data = ( void * ) val ;
fprintf ( stderr , " %s \t %d \t %d \n " , file , ( int ) data [ 0 ] , ( int ) data [ 1 ] ) ;
return ST_CONTINUE ;
}
__attribute__ ( ( destructor ) )
2018-09-25 21:13:29 +03:00
void
rb_malloc_info_show_results ( void )
2018-06-20 10:53:29 +03:00
{
int i ;
fprintf ( stderr , " * malloc_info gen statistics \n " ) ;
for ( i = 0 ; i < MALLOC_INFO_GEN_SIZE ; i + + ) {
if ( i = = MALLOC_INFO_GEN_SIZE - 1 ) {
fprintf ( stderr , " more \t %d \t %d \n " , ( int ) malloc_info_gen_cnt [ i ] , ( int ) malloc_info_gen_size [ i ] ) ;
}
else {
fprintf ( stderr , " %d \t %d \t %d \n " , i , ( int ) malloc_info_gen_cnt [ i ] , ( int ) malloc_info_gen_size [ i ] ) ;
}
}
fprintf ( stderr , " * malloc_info size statistics \n " ) ;
for ( i = 0 ; i < MALLOC_INFO_SIZE_SIZE ; i + + ) {
int s = 16 < < i ;
fprintf ( stderr , " %d \t %d \n " , ( int ) s , ( int ) malloc_info_size [ i ] ) ;
}
fprintf ( stderr , " more \t %d \n " , ( int ) malloc_info_size [ i ] ) ;
if ( malloc_info_file_table ) {
fprintf ( stderr , " * malloc_info file statistics \n " ) ;
st_foreach ( malloc_info_file_table , mmalloc_info_file_i , 0 ) ;
}
}
2018-09-25 21:13:29 +03:00
# else
void
rb_malloc_info_show_results ( void )
{
}
2018-06-20 10:53:29 +03:00
# endif
2012-08-05 14:39:37 +04:00
static void
2013-12-05 12:52:30 +04:00
objspace_xfree ( rb_objspace_t * objspace , void * ptr , size_t old_size )
2012-08-05 14:39:37 +04:00
{
# if CALC_EXACT_MALLOC_SIZE
2018-06-20 10:53:29 +03:00
struct malloc_obj_info * info = ( struct malloc_obj_info * ) ptr - 1 ;
ptr = info ;
old_size = info - > size ;
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
{
int gen = ( int ) ( objspace - > profile . count - info - > gen ) ;
int gen_index = gen > = MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE - 1 : gen ;
int i ;
malloc_info_gen_cnt [ gen_index ] + + ;
malloc_info_gen_size [ gen_index ] + = info - > size ;
for ( i = 0 ; i < MALLOC_INFO_SIZE_SIZE ; i + + ) {
size_t s = 16 < < i ;
if ( info - > size < = s ) {
malloc_info_size [ i ] + + ;
goto found ;
}
}
malloc_info_size [ i ] + + ;
found : ;
{
st_data_t key = ( st_data_t ) info - > file ;
size_t * data ;
if ( malloc_info_file_table = = NULL ) {
malloc_info_file_table = st_init_numtable_with_size ( 1024 ) ;
}
if ( st_lookup ( malloc_info_file_table , key , ( st_data_t * ) & data ) ) {
/* hit */
}
else {
data = malloc ( sizeof ( size_t ) * 2 ) ;
if ( data = = NULL ) rb_bug ( " objspace_xfree: can not allocate memory " ) ;
data [ 0 ] = data [ 1 ] = 0 ;
st_insert ( malloc_info_file_table , key , ( st_data_t ) data ) ;
}
data [ 0 ] + + ;
data [ 1 ] + = info - > size ;
} ;
#if 0 /* verbose output */
if ( gen > = 2 ) {
if ( info - > file ) {
fprintf ( stderr , " free - size:%d, gen:%d, pos: %s:%d \n " , ( int ) info - > size , gen , info - > file , ( int ) info - > line ) ;
}
else {
fprintf ( stderr , " free - size:%d, gen:%d \n " , ( int ) info - > size , gen ) ;
}
}
# endif
}
# endif
2012-08-05 14:39:37 +04:00
# endif
2013-12-05 12:52:30 +04:00
old_size = objspace_malloc_size ( objspace , ptr , old_size ) ;
1998-01-16 15:13:05 +03:00
2012-08-05 14:39:37 +04:00
free ( ptr ) ;
2018-09-25 21:13:29 +03:00
RB_DEBUG_COUNTER_INC ( heap_xfree ) ;
2013-12-05 08:54:20 +04:00
2013-12-05 12:52:30 +04:00
objspace_malloc_increase ( objspace , ptr , 0 , old_size , MEMOP_TYPE_FREE ) ;
2012-08-05 14:39:37 +04:00
}
2006-03-02 08:22:30 +03:00
2016-04-21 23:59:40 +03:00
static void *
ruby_xmalloc0 ( size_t size )
{
return objspace_xmalloc0 ( & rb_objspace , size ) ;
}
2012-08-05 14:39:37 +04:00
void *
2018-06-20 10:53:29 +03:00
ruby_xmalloc_body ( size_t size )
2012-08-05 14:39:37 +04:00
{
2017-05-12 10:48:08 +03:00
if ( ( ssize_t ) size < 0 ) {
negative_size_allocation_error ( " too large allocation size " ) ;
}
return ruby_xmalloc0 ( size ) ;
2012-08-05 14:39:37 +04:00
}
2006-03-02 08:22:30 +03:00
2015-08-11 09:22:34 +03:00
void
ruby_malloc_size_overflow ( size_t count , size_t elsize )
2006-03-02 08:22:30 +03:00
{
2015-08-11 09:22:34 +03:00
rb_raise ( rb_eArgError ,
2016-09-13 15:33:13 +03:00
" malloc: possible integer overflow (% " PRIuSIZE " *% " PRIuSIZE " ) " ,
2015-08-11 09:22:34 +03:00
count , elsize ) ;
2006-03-02 08:22:30 +03:00
}
2012-08-05 14:39:37 +04:00
void *
2018-06-20 10:53:29 +03:00
ruby_xmalloc2_body ( size_t n , size_t size )
2008-05-31 18:03:23 +04:00
{
2017-05-12 10:48:08 +03:00
return objspace_xmalloc0 ( & rb_objspace , xmalloc2_size ( n , size ) ) ;
2008-05-31 18:03:23 +04:00
}
2012-08-05 14:39:37 +04:00
static void *
2017-05-12 10:48:08 +03:00
objspace_xcalloc ( rb_objspace_t * objspace , size_t size )
2007-11-03 18:09:10 +03:00
{
2012-08-05 14:39:37 +04:00
void * mem ;
2007-11-03 18:09:10 +03:00
2013-12-05 12:52:30 +04:00
size = objspace_malloc_prepare ( objspace , size ) ;
2012-08-05 14:39:37 +04:00
TRY_WITH_GC ( mem = calloc ( 1 , size ) ) ;
2013-12-05 12:52:30 +04:00
return objspace_malloc_fixup ( objspace , mem , size ) ;
2012-08-05 14:39:37 +04:00
}
2007-11-03 18:09:10 +03:00
2012-08-05 14:39:37 +04:00
void *
2018-06-20 10:53:29 +03:00
ruby_xcalloc_body ( size_t n , size_t size )
2012-08-05 14:39:37 +04:00
{
2017-05-12 10:48:08 +03:00
return objspace_xcalloc ( & rb_objspace , xmalloc2_size ( n , size ) ) ;
2012-08-05 14:39:37 +04:00
}
2007-11-03 18:09:10 +03:00
2013-11-25 05:13:31 +04:00
# ifdef ruby_sized_xrealloc
# undef ruby_sized_xrealloc
# endif
2012-08-05 14:39:37 +04:00
void *
2013-10-17 12:41:23 +04:00
ruby_sized_xrealloc ( void * ptr , size_t new_size , size_t old_size )
2012-08-05 14:39:37 +04:00
{
2017-05-10 17:25:03 +03:00
if ( ( ssize_t ) new_size < 0 ) {
negative_size_allocation_error ( " too large allocation size " ) ;
}
2013-12-05 12:52:30 +04:00
return objspace_xrealloc ( & rb_objspace , ptr , new_size , old_size ) ;
2013-10-17 11:57:03 +04:00
}
void *
2018-06-20 10:53:29 +03:00
ruby_xrealloc_body ( void * ptr , size_t new_size )
2013-10-17 11:57:03 +04:00
{
2013-10-17 12:41:23 +04:00
return ruby_sized_xrealloc ( ptr , new_size , 0 ) ;
2012-08-05 14:39:37 +04:00
}
2008-08-11 13:36:57 +04:00
2013-12-10 11:16:06 +04:00
# ifdef ruby_sized_xrealloc2
# undef ruby_sized_xrealloc2
# endif
2012-08-05 14:39:37 +04:00
void *
2013-12-10 11:16:06 +04:00
ruby_sized_xrealloc2 ( void * ptr , size_t n , size_t size , size_t old_n )
2012-08-05 14:39:37 +04:00
{
size_t len = size * n ;
if ( n ! = 0 & & size ! = len / n ) {
rb_raise ( rb_eArgError , " realloc: possible integer overflow " ) ;
2007-11-03 18:09:10 +03:00
}
2013-12-10 11:16:06 +04:00
return objspace_xrealloc ( & rb_objspace , ptr , len , old_n * size ) ;
}
void *
2018-06-20 10:53:29 +03:00
ruby_xrealloc2_body ( void * ptr , size_t n , size_t size )
2013-12-10 11:16:06 +04:00
{
return ruby_sized_xrealloc2 ( ptr , n , size , 0 ) ;
2012-08-05 14:39:37 +04:00
}
2007-11-03 18:09:10 +03:00
2013-11-25 05:13:31 +04:00
# ifdef ruby_sized_xfree
# undef ruby_sized_xfree
# endif
2012-08-05 14:39:37 +04:00
void
2013-10-17 12:41:23 +04:00
ruby_sized_xfree ( void * x , size_t size )
2012-08-05 14:39:37 +04:00
{
2013-10-17 11:57:03 +04:00
if ( x ) {
2013-12-05 12:52:30 +04:00
objspace_xfree ( & rb_objspace , x , size ) ;
2013-10-17 11:57:03 +04:00
}
2012-08-05 14:39:37 +04:00
}
2013-10-17 11:57:03 +04:00
void
ruby_xfree ( void * x )
{
2013-10-17 12:41:23 +04:00
ruby_sized_xfree ( x , 0 ) ;
2013-10-17 11:57:03 +04:00
}
2012-08-05 14:39:37 +04:00
/* Mimic ruby_xmalloc, but need not rb_objspace.
* should return pointer suitable for ruby_xfree
*/
void *
ruby_mimmalloc ( size_t size )
{
void * mem ;
# if CALC_EXACT_MALLOC_SIZE
2018-06-20 10:53:29 +03:00
size + = sizeof ( struct malloc_obj_info ) ;
2012-08-05 14:39:37 +04:00
# endif
mem = malloc ( size ) ;
# if CALC_EXACT_MALLOC_SIZE
/* set 0 for consistency of allocated_size/allocations */
2018-06-20 10:53:29 +03:00
{
struct malloc_obj_info * info = mem ;
info - > size = 0 ;
2018-06-23 16:58:51 +03:00
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
info - > gen = 0 ;
info - > file = NULL ;
info - > line = 0 ;
# else
info - > file = NULL ;
# endif
2018-06-20 10:53:29 +03:00
mem = info + 1 ;
}
2012-08-05 14:39:37 +04:00
# endif
return mem ;
2007-11-03 18:09:10 +03:00
}
2013-11-22 05:38:08 +04:00
void
ruby_mimfree ( void * ptr )
{
# if CALC_EXACT_MALLOC_SIZE
2018-06-20 10:53:29 +03:00
struct malloc_obj_info * info = ( struct malloc_obj_info * ) ptr - 1 ;
ptr = info ;
2013-11-22 05:38:08 +04:00
# endif
2018-06-20 10:53:29 +03:00
free ( ptr ) ;
2013-11-22 05:38:08 +04:00
}
2015-08-06 04:56:37 +03:00
void *
2016-04-21 23:59:40 +03:00
rb_alloc_tmp_buffer_with_count ( volatile VALUE * store , size_t size , size_t cnt )
2015-08-06 04:56:37 +03:00
{
void * ptr ;
2018-05-09 20:40:04 +03:00
VALUE imemo ;
rb_imemo_tmpbuf_t * tmpbuf ;
2015-08-06 04:56:37 +03:00
2018-05-09 20:40:04 +03:00
/* Keep the order; allocate an empty imemo first then xmalloc, to
* get rid of potential memory leak */
imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer ( NULL , 0 ) ;
* store = imemo ;
2016-04-21 23:59:40 +03:00
ptr = ruby_xmalloc0 ( size ) ;
2018-05-09 20:40:04 +03:00
tmpbuf = ( rb_imemo_tmpbuf_t * ) imemo ;
tmpbuf - > ptr = ptr ;
tmpbuf - > cnt = cnt ;
2015-08-06 04:56:37 +03:00
return ptr ;
}
2016-04-21 23:59:40 +03:00
void *
rb_alloc_tmp_buffer ( volatile VALUE * store , long len )
{
long cnt ;
if ( len < 0 | | ( cnt = ( long ) roomof ( len , sizeof ( VALUE ) ) ) < 0 ) {
rb_raise ( rb_eArgError , " negative buffer size (or size too big) " ) ;
}
return rb_alloc_tmp_buffer_with_count ( store , len , cnt ) ;
}
2015-08-06 04:56:37 +03:00
void
rb_free_tmp_buffer ( volatile VALUE * store )
{
2018-05-09 10:11:59 +03:00
rb_imemo_tmpbuf_t * s = ( rb_imemo_tmpbuf_t * ) ATOMIC_VALUE_EXCHANGE ( * store , 0 ) ;
2015-08-06 04:56:37 +03:00
if ( s ) {
2017-11-04 16:31:47 +03:00
void * ptr = ATOMIC_PTR_EXCHANGE ( s - > ptr , 0 ) ;
s - > cnt = 0 ;
2015-08-06 04:56:37 +03:00
ruby_xfree ( ptr ) ;
}
}
2013-12-05 08:54:20 +04:00
# if MALLOC_ALLOCATED_SIZE
2012-03-13 07:37:06 +04:00
/*
2012-08-05 14:39:37 +04:00
* call - seq :
* GC . malloc_allocated_size - > Integer
2012-03-13 07:37:06 +04:00
*
2012-11-29 12:15:53 +04:00
* Returns the size of memory allocated by malloc ( ) .
*
* Only available if ruby was built with + CALC_EXACT_MALLOC_SIZE + .
2012-08-05 14:39:37 +04:00
*/
static VALUE
gc_malloc_allocated_size ( VALUE self )
{
return UINT2NUM ( rb_objspace . malloc_params . allocated_size ) ;
}
/*
* call - seq :
* GC . malloc_allocations - > Integer
*
2012-11-29 12:15:53 +04:00
* Returns the number of malloc ( ) allocations .
*
* Only available if ruby was built with + CALC_EXACT_MALLOC_SIZE + .
2012-03-13 07:37:06 +04:00
*/
2012-08-05 14:39:37 +04:00
static VALUE
gc_malloc_allocations ( VALUE self )
{
return UINT2NUM ( rb_objspace . malloc_params . allocations ) ;
}
# endif
2016-09-20 10:52:25 +03:00
void
rb_gc_adjust_memory_usage ( ssize_t diff )
{
rb_objspace_t * objspace = & rb_objspace ;
if ( diff > 0 ) {
objspace_malloc_increase ( objspace , 0 , diff , 0 , MEMOP_TYPE_REALLOC ) ;
}
else if ( diff < 0 ) {
objspace_malloc_increase ( objspace , 0 , 0 , - diff , MEMOP_TYPE_REALLOC ) ;
}
}
2012-08-05 14:39:37 +04:00
/*
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - WeakMap - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*/
2012-03-13 07:37:06 +04:00
struct weakmap {
st_table * obj2wmap ; /* obj -> [ref,...] */
st_table * wmap2obj ; /* ref -> obj */
VALUE final ;
} ;
2013-12-12 11:43:36 +04:00
# define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
# if WMAP_DELETE_DEAD_OBJECT_IN_MARK
2012-03-13 07:37:06 +04:00
static int
wmap_mark_map ( st_data_t key , st_data_t val , st_data_t arg )
{
2013-11-01 18:24:34 +04:00
rb_objspace_t * objspace = ( rb_objspace_t * ) arg ;
VALUE obj = ( VALUE ) val ;
if ( ! is_live_object ( objspace , obj ) ) return ST_DELETE ;
2012-03-13 07:37:06 +04:00
return ST_CONTINUE ;
}
2013-12-12 11:43:36 +04:00
# endif
2012-03-13 07:37:06 +04:00
static void
wmap_mark ( void * ptr )
{
struct weakmap * w = ptr ;
2013-12-12 11:43:36 +04:00
# if WMAP_DELETE_DEAD_OBJECT_IN_MARK
2013-06-10 11:12:14 +04:00
if ( w - > obj2wmap ) st_foreach ( w - > obj2wmap , wmap_mark_map , ( st_data_t ) & rb_objspace ) ;
2013-12-12 11:43:36 +04:00
# endif
2012-03-13 07:37:06 +04:00
rb_gc_mark ( w - > final ) ;
}
static int
wmap_free_map ( st_data_t key , st_data_t val , st_data_t arg )
{
2013-12-10 12:21:33 +04:00
VALUE * ptr = ( VALUE * ) val ;
ruby_sized_xfree ( ptr , ( ptr [ 0 ] + 1 ) * sizeof ( VALUE ) ) ;
2012-03-13 07:37:06 +04:00
return ST_CONTINUE ;
}
static void
wmap_free ( void * ptr )
{
struct weakmap * w = ptr ;
st_foreach ( w - > obj2wmap , wmap_free_map , 0 ) ;
st_free_table ( w - > obj2wmap ) ;
st_free_table ( w - > wmap2obj ) ;
}
static int
wmap_memsize_map ( st_data_t key , st_data_t val , st_data_t arg )
{
2013-12-10 12:21:33 +04:00
VALUE * ptr = ( VALUE * ) val ;
* ( size_t * ) arg + = ( ptr [ 0 ] + 1 ) * sizeof ( VALUE ) ;
2012-03-13 07:37:06 +04:00
return ST_CONTINUE ;
}
static size_t
wmap_memsize ( const void * ptr )
{
size_t size ;
const struct weakmap * w = ptr ;
size = sizeof ( * w ) ;
size + = st_memsize ( w - > obj2wmap ) ;
size + = st_memsize ( w - > wmap2obj ) ;
st_foreach ( w - > obj2wmap , wmap_memsize_map , ( st_data_t ) & size ) ;
return size ;
}
static const rb_data_type_t weakmap_type = {
" weakmap " ,
{
wmap_mark ,
wmap_free ,
wmap_memsize ,
2013-10-29 15:16:54 +04:00
} ,
2014-12-01 09:38:04 +03:00
0 , 0 , RUBY_TYPED_FREE_IMMEDIATELY
2012-03-13 07:37:06 +04:00
} ;
static VALUE
wmap_allocate ( VALUE klass )
{
struct weakmap * w ;
VALUE obj = TypedData_Make_Struct ( klass , struct weakmap , & weakmap_type , w ) ;
w - > obj2wmap = st_init_numtable ( ) ;
w - > wmap2obj = st_init_numtable ( ) ;
w - > final = rb_obj_method ( obj , ID2SYM ( rb_intern ( " finalize " ) ) ) ;
return obj ;
}
static int
2012-03-29 18:50:20 +04:00
wmap_final_func ( st_data_t * key , st_data_t * value , st_data_t arg , int existing )
2012-03-13 07:37:06 +04:00
{
2013-12-10 12:21:33 +04:00
VALUE wmap , * ptr , size , i , j ;
2012-03-29 11:36:12 +04:00
if ( ! existing ) return ST_STOP ;
2013-12-10 12:21:33 +04:00
wmap = ( VALUE ) arg , ptr = ( VALUE * ) * value ;
for ( i = j = 1 , size = ptr [ 0 ] ; i < = size ; + + i ) {
if ( ptr [ i ] ! = wmap ) {
ptr [ j + + ] = ptr [ i ] ;
}
}
if ( j = = 1 ) {
ruby_sized_xfree ( ptr , i * sizeof ( VALUE ) ) ;
return ST_DELETE ;
}
if ( j < i ) {
2014-12-25 11:33:41 +03:00
ptr = ruby_sized_xrealloc2 ( ptr , j + 1 , sizeof ( VALUE ) , i ) ;
2013-12-10 12:21:33 +04:00
ptr [ 0 ] = j ;
2013-12-13 20:19:49 +04:00
* value = ( st_data_t ) ptr ;
2013-12-10 12:21:33 +04:00
}
2012-03-13 07:37:06 +04:00
return ST_CONTINUE ;
}
2018-02-23 05:16:42 +03:00
/* :nodoc: */
2012-03-13 07:37:06 +04:00
static VALUE
2012-11-24 16:26:57 +04:00
wmap_finalize ( VALUE self , VALUE objid )
2012-03-13 07:37:06 +04:00
{
2012-11-24 16:26:57 +04:00
st_data_t orig , wmap , data ;
2013-12-10 12:21:33 +04:00
VALUE obj , * rids , i , size ;
2012-03-13 07:37:06 +04:00
struct weakmap * w ;
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
2012-11-24 16:26:54 +04:00
/* Get reference from object id. */
2012-12-22 08:25:18 +04:00
obj = obj_id_to_ref ( objid ) ;
2012-03-13 07:37:06 +04:00
2012-11-24 16:26:54 +04:00
/* obj is original referenced object and/or weak reference. */
2012-11-24 16:26:57 +04:00
orig = ( st_data_t ) obj ;
if ( st_delete ( w - > obj2wmap , & orig , & data ) ) {
2013-12-10 12:21:33 +04:00
rids = ( VALUE * ) data ;
size = * rids + + ;
for ( i = 0 ; i < size ; + + i ) {
wmap = ( st_data_t ) rids [ i ] ;
2012-11-24 16:26:57 +04:00
st_delete ( w - > wmap2obj , & wmap , NULL ) ;
2012-03-13 07:37:06 +04:00
}
2013-12-10 12:21:33 +04:00
ruby_sized_xfree ( ( VALUE * ) data , ( size + 1 ) * sizeof ( VALUE ) ) ;
2012-03-13 07:37:06 +04:00
}
2012-11-24 16:26:57 +04:00
wmap = ( st_data_t ) obj ;
if ( st_delete ( w - > wmap2obj , & wmap , & orig ) ) {
wmap = ( st_data_t ) obj ;
st_update ( w - > obj2wmap , orig , wmap_final_func , wmap ) ;
2012-03-13 07:37:06 +04:00
}
return self ;
}
2013-10-18 10:59:14 +04:00
struct wmap_iter_arg {
rb_objspace_t * objspace ;
VALUE value ;
} ;
static int
wmap_inspect_i ( st_data_t key , st_data_t val , st_data_t arg )
{
2013-10-18 11:04:42 +04:00
VALUE str = ( VALUE ) arg ;
2013-10-18 10:59:14 +04:00
VALUE k = ( VALUE ) key , v = ( VALUE ) val ;
2013-10-18 11:06:43 +04:00
if ( RSTRING_PTR ( str ) [ 0 ] = = ' # ' ) {
2013-10-18 10:59:14 +04:00
rb_str_cat2 ( str , " , " ) ;
2013-10-18 11:06:43 +04:00
}
else {
rb_str_cat2 ( str , " : " ) ;
2013-10-18 10:59:14 +04:00
RSTRING_PTR ( str ) [ 0 ] = ' # ' ;
}
k = SPECIAL_CONST_P ( k ) ? rb_inspect ( k ) : rb_any_to_s ( k ) ;
rb_str_append ( str , k ) ;
rb_str_cat2 ( str , " => " ) ;
v = SPECIAL_CONST_P ( v ) ? rb_inspect ( v ) : rb_any_to_s ( v ) ;
rb_str_append ( str , v ) ;
OBJ_INFECT ( str , k ) ;
OBJ_INFECT ( str , v ) ;
return ST_CONTINUE ;
}
static VALUE
wmap_inspect ( VALUE self )
{
VALUE str ;
VALUE c = rb_class_name ( CLASS_OF ( self ) ) ;
struct weakmap * w ;
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
str = rb_sprintf ( " -<% " PRIsVALUE " :%p " , c , ( void * ) self ) ;
2013-12-10 09:02:17 +04:00
if ( w - > wmap2obj ) {
st_foreach ( w - > wmap2obj , wmap_inspect_i , str ) ;
2013-10-18 10:59:14 +04:00
}
RSTRING_PTR ( str ) [ 0 ] = ' # ' ;
2013-10-18 11:04:42 +04:00
rb_str_cat2 ( str , " > " ) ;
2013-10-18 10:59:14 +04:00
return str ;
}
static int
wmap_each_i ( st_data_t key , st_data_t val , st_data_t arg )
{
rb_objspace_t * objspace = ( rb_objspace_t * ) arg ;
VALUE obj = ( VALUE ) val ;
if ( is_id_value ( objspace , obj ) & & is_live_object ( objspace , obj ) ) {
rb_yield_values ( 2 , ( VALUE ) key , obj ) ;
}
return ST_CONTINUE ;
}
/* Iterates over keys and objects in a weakly referenced object */
static VALUE
wmap_each ( VALUE self )
{
struct weakmap * w ;
rb_objspace_t * objspace = & rb_objspace ;
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
st_foreach ( w - > wmap2obj , wmap_each_i , ( st_data_t ) objspace ) ;
return self ;
}
static int
wmap_each_key_i ( st_data_t key , st_data_t val , st_data_t arg )
{
rb_objspace_t * objspace = ( rb_objspace_t * ) arg ;
VALUE obj = ( VALUE ) val ;
if ( is_id_value ( objspace , obj ) & & is_live_object ( objspace , obj ) ) {
rb_yield ( ( VALUE ) key ) ;
}
return ST_CONTINUE ;
}
/* Iterates over keys and objects in a weakly referenced object */
static VALUE
wmap_each_key ( VALUE self )
{
struct weakmap * w ;
rb_objspace_t * objspace = & rb_objspace ;
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
st_foreach ( w - > wmap2obj , wmap_each_key_i , ( st_data_t ) objspace ) ;
return self ;
}
static int
wmap_each_value_i ( st_data_t key , st_data_t val , st_data_t arg )
{
rb_objspace_t * objspace = ( rb_objspace_t * ) arg ;
VALUE obj = ( VALUE ) val ;
if ( is_id_value ( objspace , obj ) & & is_live_object ( objspace , obj ) ) {
rb_yield ( obj ) ;
}
return ST_CONTINUE ;
}
/* Iterates over keys and objects in a weakly referenced object */
static VALUE
wmap_each_value ( VALUE self )
{
struct weakmap * w ;
rb_objspace_t * objspace = & rb_objspace ;
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
st_foreach ( w - > wmap2obj , wmap_each_value_i , ( st_data_t ) objspace ) ;
return self ;
}
static int
wmap_keys_i ( st_data_t key , st_data_t val , st_data_t arg )
{
2013-12-12 06:01:18 +04:00
struct wmap_iter_arg * argp = ( struct wmap_iter_arg * ) arg ;
rb_objspace_t * objspace = argp - > objspace ;
VALUE ary = argp - > value ;
VALUE obj = ( VALUE ) val ;
if ( is_id_value ( objspace , obj ) & & is_live_object ( objspace , obj ) ) {
rb_ary_push ( ary , ( VALUE ) key ) ;
}
2013-10-18 10:59:14 +04:00
return ST_CONTINUE ;
}
/* Iterates over keys and objects in a weakly referenced object */
static VALUE
wmap_keys ( VALUE self )
{
struct weakmap * w ;
2013-12-12 06:01:18 +04:00
struct wmap_iter_arg args ;
2013-10-18 10:59:14 +04:00
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
2013-12-12 06:01:18 +04:00
args . objspace = & rb_objspace ;
args . value = rb_ary_new ( ) ;
st_foreach ( w - > wmap2obj , wmap_keys_i , ( st_data_t ) & args ) ;
return args . value ;
2013-10-18 10:59:14 +04:00
}
static int
wmap_values_i ( st_data_t key , st_data_t val , st_data_t arg )
{
struct wmap_iter_arg * argp = ( struct wmap_iter_arg * ) arg ;
rb_objspace_t * objspace = argp - > objspace ;
VALUE ary = argp - > value ;
VALUE obj = ( VALUE ) val ;
if ( is_id_value ( objspace , obj ) & & is_live_object ( objspace , obj ) ) {
rb_ary_push ( ary , obj ) ;
}
return ST_CONTINUE ;
}
/* Iterates over values and objects in a weakly referenced object */
static VALUE
wmap_values ( VALUE self )
{
struct weakmap * w ;
struct wmap_iter_arg args ;
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
args . objspace = & rb_objspace ;
args . value = rb_ary_new ( ) ;
st_foreach ( w - > wmap2obj , wmap_values_i , ( st_data_t ) & args ) ;
return args . value ;
}
2013-12-10 11:16:08 +04:00
static int
wmap_aset_update ( st_data_t * key , st_data_t * val , st_data_t arg , int existing )
{
2013-12-10 12:21:33 +04:00
VALUE size , * ptr , * optr ;
2013-12-10 11:16:08 +04:00
if ( existing ) {
2013-12-10 12:21:33 +04:00
size = ( ptr = optr = ( VALUE * ) * val ) [ 0 ] ;
+ + size ;
ptr = ruby_sized_xrealloc2 ( ptr , size + 1 , sizeof ( VALUE ) , size ) ;
2013-12-10 11:16:08 +04:00
}
else {
2013-12-10 12:21:33 +04:00
optr = 0 ;
size = 1 ;
2017-05-11 17:28:38 +03:00
ptr = ruby_xmalloc0 ( 2 * sizeof ( VALUE ) ) ;
2013-12-10 12:21:33 +04:00
}
ptr [ 0 ] = size ;
ptr [ size ] = ( VALUE ) arg ;
if ( ptr = = optr ) return ST_STOP ;
* val = ( st_data_t ) ptr ;
2013-12-10 11:16:08 +04:00
return ST_CONTINUE ;
}
2012-11-29 12:15:53 +04:00
/* Creates a weak reference from the given key to the given value */
2012-03-13 07:37:06 +04:00
static VALUE
wmap_aset ( VALUE self , VALUE wmap , VALUE orig )
{
struct weakmap * w ;
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
2013-12-10 09:17:19 +04:00
should_be_finalizable ( orig ) ;
should_be_finalizable ( wmap ) ;
define_final0 ( orig , w - > final ) ;
define_final0 ( wmap , w - > final ) ;
2013-12-10 11:16:08 +04:00
st_update ( w - > obj2wmap , ( st_data_t ) orig , wmap_aset_update , wmap ) ;
2012-03-13 07:37:06 +04:00
st_insert ( w - > wmap2obj , ( st_data_t ) wmap , ( st_data_t ) orig ) ;
return nonspecial_obj_id ( orig ) ;
}
2012-11-29 12:15:53 +04:00
/* Retrieves a weakly referenced object with the given key */
2012-03-13 07:37:06 +04:00
static VALUE
wmap_aref ( VALUE self , VALUE wmap )
{
st_data_t data ;
VALUE obj ;
struct weakmap * w ;
rb_objspace_t * objspace = & rb_objspace ;
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
if ( ! st_lookup ( w - > wmap2obj , ( st_data_t ) wmap , & data ) ) return Qnil ;
obj = ( VALUE ) data ;
if ( ! is_id_value ( objspace , obj ) ) return Qnil ;
if ( ! is_live_object ( objspace , obj ) ) return Qnil ;
return obj ;
}
2013-10-18 10:59:12 +04:00
/* Returns +true+ if +key+ is registered */
static VALUE
wmap_has_key ( VALUE self , VALUE key )
{
return NIL_P ( wmap_aref ( self , key ) ) ? Qfalse : Qtrue ;
}
2018-02-23 05:18:52 +03:00
/* Returns the number of referenced objects */
2013-12-09 11:13:40 +04:00
static VALUE
wmap_size ( VALUE self )
{
struct weakmap * w ;
st_index_t n ;
TypedData_Get_Struct ( self , struct weakmap , & weakmap_type , w ) ;
n = w - > wmap2obj - > num_entries ;
# if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
return ULONG2NUM ( n ) ;
# else
return ULL2NUM ( n ) ;
# endif
}
2008-06-08 14:27:06 +04:00
2012-08-04 18:12:12 +04:00
/*
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - GC profiler - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*/
2012-11-21 17:15:10 +04:00
# define GC_PROFILE_RECORD_DEFAULT_SIZE 100
2012-08-04 18:12:12 +04:00
2014-09-08 08:11:00 +04:00
/* return sec in user time */
2012-08-04 18:12:12 +04:00
static double
getrusage_time ( void )
{
2012-12-05 18:53:16 +04:00
# if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
2013-08-21 12:34:48 +04:00
{
static int try_clock_gettime = 1 ;
struct timespec ts ;
if ( try_clock_gettime & & clock_gettime ( CLOCK_PROCESS_CPUTIME_ID , & ts ) = = 0 ) {
return ts . tv_sec + ts . tv_nsec * 1e-9 ;
}
else {
try_clock_gettime = 0 ;
}
2012-12-05 18:53:16 +04:00
}
2013-08-21 12:34:48 +04:00
# endif
# ifdef RUSAGE_SELF
2012-08-04 18:12:12 +04:00
{
2013-08-21 12:34:48 +04:00
struct rusage usage ;
struct timeval time ;
if ( getrusage ( RUSAGE_SELF , & usage ) = = 0 ) {
time = usage . ru_utime ;
return time . tv_sec + time . tv_usec * 1e-6 ;
}
2012-08-04 18:12:12 +04:00
}
2013-08-21 12:34:48 +04:00
# endif
# ifdef _WIN32
{
2014-09-08 08:11:00 +04:00
FILETIME creation_time , exit_time , kernel_time , user_time ;
ULARGE_INTEGER ui ;
LONG_LONG q ;
double t ;
if ( GetProcessTimes ( GetCurrentProcess ( ) ,
& creation_time , & exit_time , & kernel_time , & user_time ) ! = 0 ) {
memcpy ( & ui , & user_time , sizeof ( FILETIME ) ) ;
q = ui . QuadPart / 10L ;
t = ( DWORD ) ( q % 1000000L ) * 1e-6 ;
q / = 1000000L ;
2012-08-04 18:12:12 +04:00
# ifdef __GNUC__
2014-09-08 08:11:00 +04:00
t + = q ;
2012-08-04 18:12:12 +04:00
# else
2014-09-08 08:11:00 +04:00
t + = ( double ) ( DWORD ) ( q > > 16 ) * ( 1 < < 16 ) ;
t + = ( DWORD ) q & ~ ( ~ 0 < < 16 ) ;
2012-08-04 18:12:12 +04:00
# endif
2014-09-08 08:11:00 +04:00
return t ;
}
2013-08-21 12:34:48 +04:00
}
2012-08-04 18:12:12 +04:00
# endif
2013-08-21 12:34:48 +04:00
return 0.0 ;
2012-08-04 18:12:12 +04:00
}
static inline void
2013-06-24 02:58:01 +04:00
gc_prof_setup_new_record ( rb_objspace_t * objspace , int reason )
2012-08-04 18:12:12 +04:00
{
if ( objspace - > profile . run ) {
2013-05-13 20:34:25 +04:00
size_t index = objspace - > profile . next_index ;
gc_profile_record * record ;
2012-08-04 18:12:12 +04:00
2013-05-21 12:19:07 +04:00
/* create new record */
2013-05-13 20:34:25 +04:00
objspace - > profile . next_index + + ;
2013-06-22 01:51:41 +04:00
if ( ! objspace - > profile . records ) {
2013-05-13 20:34:25 +04:00
objspace - > profile . size = GC_PROFILE_RECORD_DEFAULT_SIZE ;
2013-06-22 01:51:41 +04:00
objspace - > profile . records = malloc ( sizeof ( gc_profile_record ) * objspace - > profile . size ) ;
2013-05-13 20:34:25 +04:00
}
if ( index > = objspace - > profile . size ) {
2016-05-08 20:52:38 +03:00
void * ptr ;
2013-05-13 20:34:25 +04:00
objspace - > profile . size + = 1000 ;
2016-05-08 20:52:38 +03:00
ptr = realloc ( objspace - > profile . records , sizeof ( gc_profile_record ) * objspace - > profile . size ) ;
if ( ! ptr ) rb_memerror ( ) ;
objspace - > profile . records = ptr ;
2013-05-13 20:34:25 +04:00
}
2013-06-22 01:51:41 +04:00
if ( ! objspace - > profile . records ) {
2013-05-13 20:34:25 +04:00
rb_bug ( " gc_profile malloc or realloc miss " ) ;
}
2013-06-22 01:51:41 +04:00
record = objspace - > profile . current_record = & objspace - > profile . records [ objspace - > profile . next_index - 1 ] ;
2013-05-13 20:34:25 +04:00
MEMZERO ( record , gc_profile_record , 1 ) ;
2013-06-24 03:01:06 +04:00
2013-06-24 02:58:01 +04:00
/* setup before-GC parameter */
2014-09-09 17:09:14 +04:00
record - > flags = reason | ( ruby_gc_stressful ? GPR_FLAG_STRESS : 0 ) ;
2013-12-05 08:54:20 +04:00
# if MALLOC_ALLOCATED_SIZE
2013-06-24 02:58:01 +04:00
record - > allocated_size = malloc_allocated_size ;
2013-11-09 03:59:20 +04:00
# endif
2016-10-07 14:21:41 +03:00
# if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
2013-11-09 03:59:20 +04:00
# ifdef RUSAGE_SELF
{
struct rusage usage ;
if ( getrusage ( RUSAGE_SELF , & usage ) = = 0 ) {
record - > maxrss = usage . ru_maxrss ;
record - > minflt = usage . ru_minflt ;
record - > majflt = usage . ru_majflt ;
}
}
# endif
2013-06-24 02:58:01 +04:00
# endif
}
}
2013-05-13 20:34:25 +04:00
2013-06-24 02:58:01 +04:00
static inline void
gc_prof_timer_start ( rb_objspace_t * objspace )
{
2013-12-18 11:58:04 +04:00
if ( gc_prof_enabled ( objspace ) ) {
2013-06-24 02:58:01 +04:00
gc_profile_record * record = gc_prof_record ( objspace ) ;
2013-06-16 00:18:11 +04:00
# if GC_PROFILE_MORE_DETAIL
record - > prepare_time = objspace - > profile . prepare_time ;
# endif
2013-06-20 00:43:33 +04:00
record - > gc_time = 0 ;
record - > gc_invoke_time = getrusage_time ( ) ;
2012-08-04 18:12:12 +04:00
}
}
2013-06-20 00:43:33 +04:00
static double
elapsed_time_from ( double time )
{
double now = getrusage_time ( ) ;
if ( now > time ) {
return now - time ;
}
else {
return 0 ;
}
}
2012-08-04 18:12:12 +04:00
static inline void
2013-05-21 12:21:59 +04:00
gc_prof_timer_stop ( rb_objspace_t * objspace )
2012-08-04 18:12:12 +04:00
{
2013-12-18 11:58:04 +04:00
if ( gc_prof_enabled ( objspace ) ) {
2013-05-13 20:34:25 +04:00
gc_profile_record * record = gc_prof_record ( objspace ) ;
2013-06-20 00:43:33 +04:00
record - > gc_time = elapsed_time_from ( record - > gc_invoke_time ) ;
record - > gc_invoke_time - = objspace - > profile . invoke_time ;
2012-08-04 18:12:12 +04:00
}
}
2015-10-31 04:02:29 +03:00
# define RUBY_DTRACE_GC_HOOK(name) \
do { if ( RUBY_DTRACE_GC_ # # name # # _ENABLED ( ) ) RUBY_DTRACE_GC_ # # name ( ) ; } while ( 0 )
2012-08-04 18:12:12 +04:00
static inline void
gc_prof_mark_timer_start ( rb_objspace_t * objspace )
{
2015-10-31 04:02:29 +03:00
RUBY_DTRACE_GC_HOOK ( MARK_BEGIN ) ;
2013-06-15 21:06:15 +04:00
# if GC_PROFILE_MORE_DETAIL
2013-12-18 11:58:04 +04:00
if ( gc_prof_enabled ( objspace ) ) {
2013-05-13 20:34:25 +04:00
gc_prof_record ( objspace ) - > gc_mark_time = getrusage_time ( ) ;
2012-08-04 18:12:12 +04:00
}
2013-06-15 21:06:15 +04:00
# endif
2012-08-04 18:12:12 +04:00
}
static inline void
gc_prof_mark_timer_stop ( rb_objspace_t * objspace )
{
2015-10-31 04:02:29 +03:00
RUBY_DTRACE_GC_HOOK ( MARK_END ) ;
2013-06-15 21:06:15 +04:00
# if GC_PROFILE_MORE_DETAIL
2013-12-18 11:58:04 +04:00
if ( gc_prof_enabled ( objspace ) ) {
2013-05-13 20:34:25 +04:00
gc_profile_record * record = gc_prof_record ( objspace ) ;
2013-06-20 00:43:33 +04:00
record - > gc_mark_time = elapsed_time_from ( record - > gc_mark_time ) ;
2012-08-04 18:12:12 +04:00
}
2013-06-15 21:06:15 +04:00
# endif
2012-08-04 18:12:12 +04:00
}
static inline void
2013-06-20 00:43:33 +04:00
gc_prof_sweep_timer_start ( rb_objspace_t * objspace )
2012-08-04 18:12:12 +04:00
{
2015-10-31 04:02:29 +03:00
RUBY_DTRACE_GC_HOOK ( SWEEP_BEGIN ) ;
2013-12-18 11:58:04 +04:00
if ( gc_prof_enabled ( objspace ) ) {
2013-06-20 00:49:28 +04:00
gc_profile_record * record = gc_prof_record ( objspace ) ;
2013-06-20 00:43:33 +04:00
if ( record - > gc_time > 0 | | GC_PROFILE_MORE_DETAIL ) {
objspace - > profile . gc_sweep_start_time = getrusage_time ( ) ;
}
2012-08-04 18:12:12 +04:00
}
}
static inline void
2013-06-20 00:43:33 +04:00
gc_prof_sweep_timer_stop ( rb_objspace_t * objspace )
2012-08-04 18:12:12 +04:00
{
2015-10-31 04:02:29 +03:00
RUBY_DTRACE_GC_HOOK ( SWEEP_END ) ;
2013-06-21 10:29:30 +04:00
2013-12-18 11:58:04 +04:00
if ( gc_prof_enabled ( objspace ) ) {
2013-06-20 00:43:33 +04:00
double sweep_time ;
gc_profile_record * record = gc_prof_record ( objspace ) ;
2012-08-04 18:12:12 +04:00
2013-06-21 00:39:20 +04:00
if ( record - > gc_time > 0 ) {
2013-06-20 00:43:33 +04:00
sweep_time = elapsed_time_from ( objspace - > profile . gc_sweep_start_time ) ;
2013-06-21 00:39:20 +04:00
/* need to accumulate GC time for lazy sweep after gc() */
2013-06-20 00:43:33 +04:00
record - > gc_time + = sweep_time ;
}
2013-06-21 00:39:20 +04:00
else if ( GC_PROFILE_MORE_DETAIL ) {
sweep_time = elapsed_time_from ( objspace - > profile . gc_sweep_start_time ) ;
}
2013-05-21 12:19:07 +04:00
2013-06-20 00:43:33 +04:00
# if GC_PROFILE_MORE_DETAIL
record - > gc_sweep_time + = sweep_time ;
2013-10-23 09:22:10 +04:00
if ( heap_pages_deferred_final ) record - > flags | = GPR_FLAG_HAVE_FINALIZE ;
2013-06-15 21:06:15 +04:00
# endif
2013-12-05 14:30:38 +04:00
if ( heap_pages_deferred_final ) objspace - > profile . latest_gc_info | = GPR_FLAG_HAVE_FINALIZE ;
2013-06-20 00:43:33 +04:00
}
2012-08-04 18:12:12 +04:00
}
static inline void
gc_prof_set_malloc_info ( rb_objspace_t * objspace )
{
2013-06-15 21:06:15 +04:00
# if GC_PROFILE_MORE_DETAIL
2013-12-18 11:58:04 +04:00
if ( gc_prof_enabled ( objspace ) ) {
2013-05-13 20:34:25 +04:00
gc_profile_record * record = gc_prof_record ( objspace ) ;
2018-05-18 11:40:16 +03:00
record - > allocate_increase = malloc_increase ;
2013-05-13 20:34:25 +04:00
record - > allocate_limit = malloc_limit ;
2012-08-04 18:12:12 +04:00
}
2013-06-15 21:06:15 +04:00
# endif
2012-08-04 18:12:12 +04:00
}
static inline void
2013-06-22 02:29:09 +04:00
gc_prof_set_heap_info ( rb_objspace_t * objspace )
2012-08-04 18:12:12 +04:00
{
2013-12-18 11:58:04 +04:00
if ( gc_prof_enabled ( objspace ) ) {
2013-06-22 02:29:09 +04:00
gc_profile_record * record = gc_prof_record ( objspace ) ;
2014-09-09 14:01:18 +04:00
size_t live = objspace - > profile . total_allocated_objects_at_gc_start - objspace - > profile . total_freed_objects ;
2016-01-09 01:15:40 +03:00
size_t total = objspace - > profile . heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT ;
2012-08-04 18:12:12 +04:00
2013-06-15 21:06:15 +04:00
# if GC_PROFILE_MORE_DETAIL
2013-10-18 10:33:36 +04:00
record - > heap_use_pages = objspace - > profile . heap_used_at_gc_start ;
2013-06-22 02:29:09 +04:00
record - > heap_live_objects = live ;
record - > heap_free_objects = total - live ;
2013-06-15 21:06:15 +04:00
# endif
2013-06-20 16:20:27 +04:00
2013-06-22 02:29:09 +04:00
record - > heap_total_objects = total ;
record - > heap_use_size = live * sizeof ( RVALUE ) ;
record - > heap_total_size = total * sizeof ( RVALUE ) ;
}
2012-08-04 18:12:12 +04:00
}
/*
* call - seq :
* GC : : Profiler . clear - > nil
*
* Clears the GC profiler data .
*
*/
static VALUE
gc_profile_clear ( void )
{
rb_objspace_t * objspace = & rb_objspace ;
2012-11-21 17:15:10 +04:00
if ( GC_PROFILE_RECORD_DEFAULT_SIZE * 2 < objspace - > profile . size ) {
objspace - > profile . size = GC_PROFILE_RECORD_DEFAULT_SIZE * 2 ;
2013-06-22 01:51:41 +04:00
objspace - > profile . records = realloc ( objspace - > profile . records , sizeof ( gc_profile_record ) * objspace - > profile . size ) ;
if ( ! objspace - > profile . records ) {
2012-11-21 17:15:10 +04:00
rb_memerror ( ) ;
}
}
2013-06-22 01:51:41 +04:00
MEMZERO ( objspace - > profile . records , gc_profile_record , objspace - > profile . size ) ;
2013-05-13 20:34:25 +04:00
objspace - > profile . next_index = 0 ;
2013-06-22 01:51:41 +04:00
objspace - > profile . current_record = 0 ;
2012-08-04 18:12:12 +04:00
return Qnil ;
}
2011-09-08 07:57:41 +04:00
/*
* call - seq :
2012-11-29 12:15:53 +04:00
* GC : : Profiler . raw_data - > [ Hash , . . . ]
2011-09-08 07:57:41 +04:00
*
* Returns an Array of individual raw profile data Hashes ordered
2012-11-29 12:15:53 +04:00
* from earliest to latest by + : GC_INVOKE_TIME + .
*
* For example :
2011-09-08 07:57:41 +04:00
*
2012-11-29 12:15:53 +04:00
* [
* {
* : GC_TIME = > 1.3000000000000858e-05 ,
* : GC_INVOKE_TIME = > 0.010634999999999999 ,
* : HEAP_USE_SIZE = > 289640 ,
* : HEAP_TOTAL_SIZE = > 588960 ,
* : HEAP_TOTAL_OBJECTS = > 14724 ,
* : GC_IS_MARKED = > false
* } ,
* # . . .
2011-09-08 07:57:41 +04:00
* ]
*
* The keys mean :
*
2012-11-29 12:15:53 +04:00
* + : GC_TIME + : :
* Time elapsed in seconds for this GC run
* + : GC_INVOKE_TIME + : :
* Time elapsed in seconds from startup to when the GC was invoked
* + : HEAP_USE_SIZE + : :
* Total bytes of heap used
* + : HEAP_TOTAL_SIZE + : :
* Total size of heap in bytes
* + : HEAP_TOTAL_OBJECTS + : :
* Total number of objects
* + : GC_IS_MARKED + : :
* Returns + true + if the GC is in mark phase
*
* If ruby was built with + GC_PROFILE_MORE_DETAIL + , you will also have access
* to the following hash keys :
*
* + : GC_MARK_TIME + : :
* + : GC_SWEEP_TIME + : :
* + : ALLOCATE_INCREASE + : :
* + : ALLOCATE_LIMIT + : :
2013-10-18 10:33:36 +04:00
* + : HEAP_USE_PAGES + : :
2012-11-29 12:15:53 +04:00
* + : HEAP_LIVE_OBJECTS + : :
* + : HEAP_FREE_OBJECTS + : :
* + : HAVE_FINALIZE + : :
2011-09-08 07:57:41 +04:00
*
*/
2008-09-04 14:47:39 +04:00
static VALUE
2008-08-11 13:36:57 +04:00
gc_profile_record_get ( void )
{
VALUE prof ;
VALUE gc_profile = rb_ary_new ( ) ;
size_t i ;
rb_objspace_t * objspace = ( & rb_objspace ) ;
2009-02-22 17:23:33 +03:00
2013-12-18 12:04:08 +04:00
if ( ! objspace - > profile . run ) {
2008-08-11 13:36:57 +04:00
return Qnil ;
}
2013-05-13 20:34:25 +04:00
for ( i = 0 ; i < objspace - > profile . next_index ; i + + ) {
2013-06-22 01:51:41 +04:00
gc_profile_record * record = & objspace - > profile . records [ i ] ;
2008-08-11 13:36:57 +04:00
prof = rb_hash_new ( ) ;
2014-12-12 07:09:28 +03:00
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " GC_FLAGS " ) ) , gc_info_decode ( 0 , rb_hash_new ( ) , record - > flags ) ) ;
2013-06-22 01:51:41 +04:00
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " GC_TIME " ) ) , DBL2NUM ( record - > gc_time ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " GC_INVOKE_TIME " ) ) , DBL2NUM ( record - > gc_invoke_time ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " HEAP_USE_SIZE " ) ) , SIZET2NUM ( record - > heap_use_size ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " HEAP_TOTAL_SIZE " ) ) , SIZET2NUM ( record - > heap_total_size ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " HEAP_TOTAL_OBJECTS " ) ) , SIZET2NUM ( record - > heap_total_objects ) ) ;
2013-05-21 12:21:59 +04:00
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " GC_IS_MARKED " ) ) , Qtrue ) ;
2008-08-11 13:36:57 +04:00
# if GC_PROFILE_MORE_DETAIL
2013-06-22 01:51:41 +04:00
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " GC_MARK_TIME " ) ) , DBL2NUM ( record - > gc_mark_time ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " GC_SWEEP_TIME " ) ) , DBL2NUM ( record - > gc_sweep_time ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " ALLOCATE_INCREASE " ) ) , SIZET2NUM ( record - > allocate_increase ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " ALLOCATE_LIMIT " ) ) , SIZET2NUM ( record - > allocate_limit ) ) ;
2013-10-18 10:33:36 +04:00
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " HEAP_USE_PAGES " ) ) , SIZET2NUM ( record - > heap_use_pages ) ) ;
2013-06-22 01:51:41 +04:00
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " HEAP_LIVE_OBJECTS " ) ) , SIZET2NUM ( record - > heap_live_objects ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " HEAP_FREE_OBJECTS " ) ) , SIZET2NUM ( record - > heap_free_objects ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " REMOVING_OBJECTS " ) ) , SIZET2NUM ( record - > removing_objects ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " EMPTY_OBJECTS " ) ) , SIZET2NUM ( record - > empty_objects ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " HAVE_FINALIZE " ) ) , ( record - > flags & GPR_FLAG_HAVE_FINALIZE ) ? Qtrue : Qfalse ) ;
2008-08-11 13:36:57 +04:00
# endif
2013-06-20 16:20:27 +04:00
# if RGENGC_PROFILE > 0
2013-11-04 22:59:33 +04:00
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " OLD_OBJECTS " ) ) , SIZET2NUM ( record - > old_objects ) ) ;
2015-07-05 12:28:16 +03:00
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " REMEMBERED_NORMAL_OBJECTS " ) ) , SIZET2NUM ( record - > remembered_normal_objects ) ) ;
rb_hash_aset ( prof , ID2SYM ( rb_intern ( " REMEMBERED_SHADY_OBJECTS " ) ) , SIZET2NUM ( record - > remembered_shady_objects ) ) ;
2013-06-20 16:20:27 +04:00
# endif
2008-08-11 13:36:57 +04:00
rb_ary_push ( gc_profile , prof ) ;
}
return gc_profile ;
}
2013-11-24 23:49:02 +04:00
# if GC_PROFILE_MORE_DETAIL
2013-12-18 18:34:23 +04:00
# define MAJOR_REASON_MAX 0x10
static char *
gc_profile_dump_major_reason ( int flags , char * buff )
{
int reason = flags & GPR_FLAG_MAJOR_MASK ;
int i = 0 ;
if ( reason = = GPR_FLAG_NONE ) {
buff [ 0 ] = ' - ' ;
buff [ 1 ] = 0 ;
}
else {
# define C(x, s) \
if ( reason & GPR_FLAG_MAJOR_BY_ # # x ) { \
buff [ i + + ] = # x [ 0 ] ; \
if ( i > = MAJOR_REASON_MAX ) rb_bug ( " gc_profile_dump_major_reason: overflow " ) ; \
buff [ i ] = 0 ; \
}
C ( NOFREE , N ) ;
C ( OLDGEN , O ) ;
C ( SHADY , S ) ;
2013-11-24 23:49:02 +04:00
# if RGENGC_ESTIMATE_OLDMALLOC
2013-12-18 18:34:23 +04:00
C ( OLDMALLOC , M ) ;
2013-11-24 23:49:02 +04:00
# endif
# undef C
}
2013-12-18 18:34:23 +04:00
return buff ;
2013-11-24 23:49:02 +04:00
}
# endif
2012-10-27 18:36:22 +04:00
static void
gc_profile_dump_on ( VALUE out , VALUE ( * append ) ( VALUE , VALUE ) )
2008-08-11 13:36:57 +04:00
{
rb_objspace_t * objspace = & rb_objspace ;
2013-05-22 11:50:20 +04:00
size_t count = objspace - > profile . next_index ;
2013-12-18 18:34:23 +04:00
# ifdef MAJOR_REASON_MAX
char reason_str [ MAJOR_REASON_MAX ] ;
# endif
2009-02-22 17:23:33 +03:00
2013-05-22 11:50:20 +04:00
if ( objspace - > profile . run & & count /* > 1 */ ) {
size_t i ;
2013-05-21 11:27:32 +04:00
const gc_profile_record * record ;
2013-11-01 16:49:49 +04:00
append ( out , rb_sprintf ( " GC % " PRIuSIZE " invokes. \n " , objspace - > profile . count ) ) ;
2012-10-27 18:36:22 +04:00
append ( out , rb_str_new_cstr ( " Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms) \n " ) ) ;
2013-05-21 11:27:32 +04:00
2012-10-27 18:36:22 +04:00
for ( i = 0 ; i < count ; i + + ) {
2013-06-22 01:51:41 +04:00
record = & objspace - > profile . records [ i ] ;
2016-09-13 15:33:13 +03:00
append ( out , rb_sprintf ( " %5 " PRIuSIZE " %19.3f %20 " PRIuSIZE " %20 " PRIuSIZE " %20 " PRIuSIZE " %30.20f \n " ,
2013-05-22 11:50:20 +04:00
i + 1 , record - > gc_invoke_time , record - > heap_use_size ,
2013-05-21 12:19:07 +04:00
record - > heap_total_size , record - > heap_total_objects , record - > gc_time * 1000 ) ) ;
2008-08-11 13:36:57 +04:00
}
2013-06-21 02:38:08 +04:00
2008-08-11 13:36:57 +04:00
# if GC_PROFILE_MORE_DETAIL
2012-10-27 18:36:22 +04:00
append ( out , rb_str_new_cstr ( " \n \n " \
2013-05-21 11:27:32 +04:00
" More detail. \n " \
2013-06-16 00:18:11 +04:00
" Prepare Time = Previously GC's rest sweep time \n "
2013-12-18 18:34:23 +04:00
" Index Flags Allocate Inc. Allocate Limit "
2013-06-16 00:18:11 +04:00
# if CALC_EXACT_MALLOC_SIZE
" Allocated Size "
# endif
2013-10-26 09:35:43 +04:00
" Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj "
2013-06-21 02:38:08 +04:00
# if RGENGC_PROFILE
" OldgenObj RemNormObj RemShadObj "
2013-11-09 03:59:20 +04:00
# endif
# if GC_PROFILE_DETAIL_MEMORY
" MaxRSS(KB) MinorFLT MajorFLT "
2013-06-21 02:38:08 +04:00
# endif
" \n " ) ) ;
2012-10-27 18:36:22 +04:00
for ( i = 0 ; i < count ; i + + ) {
2013-06-22 01:51:41 +04:00
record = & objspace - > profile . records [ i ] ;
2016-09-13 15:33:13 +03:00
append ( out , rb_sprintf ( " %5 " PRIuSIZE " %4s/%c/%6s%c %13 " PRIuSIZE " %15 " PRIuSIZE
2013-06-16 00:18:11 +04:00
# if CALC_EXACT_MALLOC_SIZE
" %15 " PRIuSIZE
# endif
2013-06-21 02:38:08 +04:00
" %9 " PRIuSIZE " %17.12f %17.12f %17.12f %10 " PRIuSIZE " %10 " PRIuSIZE " %10 " PRIuSIZE " %10 " PRIuSIZE
# if RGENGC_PROFILE
2013-06-21 04:24:14 +04:00
" %10 " PRIuSIZE " %10 " PRIuSIZE " %10 " PRIuSIZE
2013-06-21 02:38:08 +04:00
# endif
2013-11-09 03:59:20 +04:00
# if GC_PROFILE_DETAIL_MEMORY
" %11ld %8ld %8ld "
# endif
2013-06-21 04:24:14 +04:00
" \n " ,
2013-05-22 11:50:20 +04:00
i + 1 ,
2013-12-18 18:34:23 +04:00
gc_profile_dump_major_reason ( record - > flags , reason_str ) ,
2013-05-21 12:19:07 +04:00
( record - > flags & GPR_FLAG_HAVE_FINALIZE ) ? ' F ' : ' . ' ,
( record - > flags & GPR_FLAG_NEWOBJ ) ? " NEWOBJ " :
( record - > flags & GPR_FLAG_MALLOC ) ? " MALLOC " :
( record - > flags & GPR_FLAG_METHOD ) ? " METHOD " :
2013-06-16 00:18:11 +04:00
( record - > flags & GPR_FLAG_CAPI ) ? " CAPI__ " : " ?????? " ,
2013-05-21 12:19:07 +04:00
( record - > flags & GPR_FLAG_STRESS ) ? ' ! ' : ' ' ,
record - > allocate_increase , record - > allocate_limit ,
2013-06-16 00:18:11 +04:00
# if CALC_EXACT_MALLOC_SIZE
record - > allocated_size ,
# endif
2013-10-18 10:33:36 +04:00
record - > heap_use_pages ,
2013-06-21 02:38:08 +04:00
record - > gc_mark_time * 1000 ,
record - > gc_sweep_time * 1000 ,
2013-06-16 00:18:11 +04:00
record - > prepare_time * 1000 ,
2013-06-21 02:38:08 +04:00
record - > heap_live_objects ,
record - > heap_free_objects ,
2013-06-16 00:18:11 +04:00
record - > removing_objects ,
record - > empty_objects
2013-06-21 02:38:08 +04:00
# if RGENGC_PROFILE
,
2013-11-04 22:59:33 +04:00
record - > old_objects ,
2013-06-21 02:38:08 +04:00
record - > remembered_normal_objects ,
record - > remembered_shady_objects
# endif
2013-11-09 03:59:20 +04:00
# if GC_PROFILE_DETAIL_MEMORY
,
record - > maxrss / 1024 ,
record - > minflt ,
record - > majflt
# endif
2013-06-16 00:18:11 +04:00
) ) ;
2008-08-11 13:36:57 +04:00
}
# endif
}
}
2012-10-27 18:36:22 +04:00
/*
* call - seq :
2012-11-29 12:15:53 +04:00
* GC : : Profiler . result - > String
2012-10-27 18:36:22 +04:00
*
* Returns a profile data report such as :
*
* GC 1 invokes .
* Index Invoke Time ( sec ) Use Size ( byte ) Total Size ( byte ) Total Object GC time ( ms )
* 1 0.012 159240 212940 10647 0.00000000000001530000
*/
static VALUE
gc_profile_result ( void )
{
2016-10-07 14:21:41 +03:00
VALUE str = rb_str_buf_new ( 0 ) ;
gc_profile_dump_on ( str , rb_str_buf_append ) ;
return str ;
2012-10-27 18:36:22 +04:00
}
2008-08-11 13:36:57 +04:00
/*
* call - seq :
* GC : : Profiler . report
2012-11-29 12:15:53 +04:00
* GC : : Profiler . report ( io )
2008-08-11 13:36:57 +04:00
*
2012-11-29 12:15:53 +04:00
* Writes the GC : : Profiler . result to < tt > $ stdout < / tt > or the given IO object .
2009-02-22 17:23:33 +03:00
*
2008-08-11 13:36:57 +04:00
*/
2008-09-04 14:47:39 +04:00
static VALUE
2008-08-11 13:36:57 +04:00
gc_profile_report ( int argc , VALUE * argv , VALUE self )
{
VALUE out ;
2018-12-06 10:49:24 +03:00
out = ( ! rb_check_arity ( argc , 0 , 1 ) ? rb_stdout : argv [ 0 ] ) ;
2012-10-27 18:36:22 +04:00
gc_profile_dump_on ( out , rb_io_write ) ;
2008-08-11 13:36:57 +04:00
return Qnil ;
}
2010-03-04 07:51:43 +03:00
/*
* call - seq :
2012-11-29 12:15:53 +04:00
* GC : : Profiler . total_time - > float
2010-03-04 07:51:43 +03:00
*
2012-10-20 15:36:46 +04:00
* The total time used for garbage collection in seconds
2010-03-04 07:51:43 +03:00
*/
static VALUE
gc_profile_total_time ( VALUE self )
{
double time = 0 ;
rb_objspace_t * objspace = & rb_objspace ;
2013-06-20 01:21:16 +04:00
if ( objspace - > profile . run & & objspace - > profile . next_index > 0 ) {
size_t i ;
2013-10-29 06:11:26 +04:00
size_t count = objspace - > profile . next_index ;
2013-06-20 01:21:16 +04:00
2013-05-13 20:34:25 +04:00
for ( i = 0 ; i < count ; i + + ) {
2013-06-22 01:51:41 +04:00
time + = objspace - > profile . records [ i ] . gc_time ;
2010-03-04 07:51:43 +03:00
}
}
return DBL2NUM ( time ) ;
}
2012-08-05 14:39:37 +04:00
/*
* call - seq :
2012-11-29 12:15:53 +04:00
* GC : : Profiler . enabled ? - > true or false
2012-08-05 14:39:37 +04:00
*
* The current status of GC profile mode .
*/
static VALUE
gc_profile_enable_get ( VALUE self )
{
rb_objspace_t * objspace = & rb_objspace ;
return objspace - > profile . run ? Qtrue : Qfalse ;
}
/*
* call - seq :
2012-11-29 12:15:53 +04:00
* GC : : Profiler . enable - > nil
2012-08-05 14:39:37 +04:00
*
* Starts the GC profiler .
*
*/
static VALUE
gc_profile_enable ( void )
{
rb_objspace_t * objspace = & rb_objspace ;
objspace - > profile . run = TRUE ;
2013-12-18 11:58:04 +04:00
objspace - > profile . current_record = 0 ;
2012-08-05 14:39:37 +04:00
return Qnil ;
}
/*
* call - seq :
2012-11-29 12:15:53 +04:00
* GC : : Profiler . disable - > nil
2012-08-05 14:39:37 +04:00
*
* Stops the GC profiler .
*
*/
static VALUE
gc_profile_disable ( void )
{
rb_objspace_t * objspace = & rb_objspace ;
objspace - > profile . run = FALSE ;
2013-06-22 01:51:41 +04:00
objspace - > profile . current_record = 0 ;
2012-08-05 14:39:37 +04:00
return Qnil ;
}
2012-11-22 19:03:46 +04:00
/*
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - DEBUG - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*/
2013-07-18 03:19:38 +04:00
static const char *
type_name ( int type , VALUE obj )
{
switch ( type ) {
# define TYPE_NAME(t) case (t): return #t;
TYPE_NAME ( T_NONE ) ;
TYPE_NAME ( T_OBJECT ) ;
TYPE_NAME ( T_CLASS ) ;
TYPE_NAME ( T_MODULE ) ;
TYPE_NAME ( T_FLOAT ) ;
TYPE_NAME ( T_STRING ) ;
TYPE_NAME ( T_REGEXP ) ;
TYPE_NAME ( T_ARRAY ) ;
TYPE_NAME ( T_HASH ) ;
TYPE_NAME ( T_STRUCT ) ;
TYPE_NAME ( T_BIGNUM ) ;
TYPE_NAME ( T_FILE ) ;
TYPE_NAME ( T_MATCH ) ;
TYPE_NAME ( T_COMPLEX ) ;
TYPE_NAME ( T_RATIONAL ) ;
TYPE_NAME ( T_NIL ) ;
TYPE_NAME ( T_TRUE ) ;
TYPE_NAME ( T_FALSE ) ;
TYPE_NAME ( T_SYMBOL ) ;
TYPE_NAME ( T_FIXNUM ) ;
TYPE_NAME ( T_UNDEF ) ;
2015-03-11 13:36:17 +03:00
TYPE_NAME ( T_IMEMO ) ;
2013-07-18 03:19:38 +04:00
TYPE_NAME ( T_ICLASS ) ;
TYPE_NAME ( T_ZOMBIE ) ;
case T_DATA :
if ( obj & & rb_objspace_data_type_name ( obj ) ) {
return rb_objspace_data_type_name ( obj ) ;
}
return " T_DATA " ;
# undef TYPE_NAME
}
return " unknown " ;
}
static const char *
obj_type_name ( VALUE obj )
{
return type_name ( TYPE ( obj ) , obj ) ;
}
2015-06-25 01:10:13 +03:00
static const char *
method_type_name ( rb_method_type_t type )
{
switch ( type ) {
case VM_METHOD_TYPE_ISEQ : return " iseq " ;
case VM_METHOD_TYPE_ATTRSET : return " attrest " ;
case VM_METHOD_TYPE_IVAR : return " ivar " ;
case VM_METHOD_TYPE_BMETHOD : return " bmethod " ;
case VM_METHOD_TYPE_ALIAS : return " alias " ;
case VM_METHOD_TYPE_REFINED : return " refined " ;
case VM_METHOD_TYPE_CFUNC : return " cfunc " ;
case VM_METHOD_TYPE_ZSUPER : return " zsuper " ;
case VM_METHOD_TYPE_MISSING : return " missing " ;
case VM_METHOD_TYPE_OPTIMIZED : return " optimized " ;
case VM_METHOD_TYPE_UNDEF : return " undef " ;
case VM_METHOD_TYPE_NOTIMPLEMENTED : return " notimplemented " ;
}
rb_bug ( " method_type_name: unreachable (type: %d) " , type ) ;
}
2014-09-08 08:11:00 +04:00
/* from array.c */
# define ARY_SHARED_P(ary) \
2017-06-22 08:03:18 +03:00
( GC_ASSERT ( ! FL_TEST ( ( ary ) , ELTS_SHARED ) | | ! FL_TEST ( ( ary ) , RARRAY_EMBED_FLAG ) ) , \
2014-09-08 08:11:00 +04:00
FL_TEST ( ( ary ) , ELTS_SHARED ) ! = 0 )
# define ARY_EMBED_P(ary) \
2017-06-22 08:03:18 +03:00
( GC_ASSERT ( ! FL_TEST ( ( ary ) , ELTS_SHARED ) | | ! FL_TEST ( ( ary ) , RARRAY_EMBED_FLAG ) ) , \
2014-09-08 08:11:00 +04:00
FL_TEST ( ( ary ) , RARRAY_EMBED_FLAG ) ! = 0 )
2016-07-26 13:07:12 +03:00
static void
rb_raw_iseq_info ( char * buff , const int buff_size , const rb_iseq_t * iseq )
{
fix SEGV touching uninitialized memory
This function can be called from Init_VM().
No assumption can be made about object internals.
(lldb) run
Process 15734 launched: './miniruby' (x86_64)
Process 15734 stopped
* thread #1: tid = 0x1441d4, 0x00000001000bdfcb miniruby`rb_raw_iseq_info(buff="0x0000000100f61f48 [0 ] T_IMEMO iseq", buff_size=256, iseq=0x0000000100f61f48) + 27 at gc.c:9273, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x50)
frame #0: 0x00000001000bdfcb miniruby`rb_raw_iseq_info(buff="0x0000000100f61f48 [0 ] T_IMEMO iseq", buff_size=256, iseq=0x0000000100f61f48) + 27 at gc.c:9273
9270 static void
9271 rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
9272 {
-> 9273 if (iseq->body->location.label) {
9274 VALUE path = rb_iseq_path(iseq);
9275 snprintf(buff, buff_size, "%s %s@%s:%d", buff,
9276 RSTRING_PTR(iseq->body->location.label),
(lldb) p *iseq
(rb_iseq_t) $0 = {
flags = 28698
reserved1 = 0
body = 0x0000000000000000
aux = {
compile_data = 0x0000000000000000
loader = (obj = 0, index = 0)
trace_events = 0
}
}
(lldb) bt
* thread #1: tid = 0x1441d4, 0x00000001000bdfcb miniruby`rb_raw_iseq_info(buff="0x0000000100f61f48 [0 ] T_IMEMO iseq", buff_size=256, iseq=0x0000000100f61f48) + 27 at gc.c:9273, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x50)
* frame #0: 0x00000001000bdfcb miniruby`rb_raw_iseq_info(buff="0x0000000100f61f48 [0 ] T_IMEMO iseq", buff_size=256, iseq=0x0000000100f61f48) + 27 at gc.c:9273
frame #1: 0x00000001000bde72 miniruby`rb_raw_obj_info(buff="0x0000000100f61f48 [0 ] T_IMEMO iseq", buff_size=256, obj=4311097160) + 2786 at gc.c:9396
frame #2: 0x00000001000b7c5f miniruby`obj_info(obj=4311097160) + 95 at gc.c:9428
frame #3: 0x00000001000c16a8 miniruby`newobj_init(klass=0, flags=28698, v1=0, v2=0, v3=0, wb_protected=1, objspace=0x00000001007ee280, obj=4311097160) + 424 at gc.c:1887
frame #4: 0x00000001000b44c9 miniruby`newobj_of(klass=0, flags=28698, v1=0, v2=0, v3=0, wb_protected=1) + 217 at gc.c:1970
frame #5: 0x00000001000b464b miniruby`rb_imemo_new(type=imemo_iseq, v1=0, v2=0, v3=0, v0=0) + 75 at gc.c:2017
frame #6: 0x00000001000fd914 miniruby`iseq_imemo_alloc + 36 at iseq.h:156
frame #7: 0x00000001000f6e1d miniruby`iseq_alloc + 13 at iseq.c:211
frame #8: 0x00000001000f6bf8 miniruby`rb_iseq_new_with_opt(node=0x0000000000000000, name=4311097200, path=4311097200, realpath=8, first_lineno=1, parent=0x0000000000000000, type=ISEQ_TYPE_TOP, option=0x0000000100335c30) + 56 at iseq.c:519
frame #9: 0x00000001000f6bb6 miniruby`rb_iseq_new(node=0x0000000000000000, name=4311097200, path=4311097200, realpath=8, parent=0x0000000000000000, type=ISEQ_TYPE_TOP) + 86 at iseq.c:480
frame #10: 0x0000000100284bb0 miniruby`Init_VM + 1040 at vm.c:3022
frame #11: 0x00000001000d4f7d miniruby`rb_call_inits + 189 at inits.c:55
frame #12: 0x000000010009fe06 miniruby`ruby_setup + 198 at eval.c:61
frame #13: 0x000000010009fe5d miniruby`ruby_init + 13 at eval.c:78
frame #14: 0x00000001000009ed miniruby`main(argc=2, argv=0x00007fff5fbfdbf0) + 93 at main.c:41
frame #15: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61564 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:00 +03:00
if ( iseq - > body & & iseq - > body - > location . label ) {
2017-06-12 07:35:52 +03:00
VALUE path = rb_iseq_path ( iseq ) ;
fix SEGV touching uninitialized memory
This function can be called from rb_data_typed_object_zalloc().
No assumption can be made about object internals.
(lldb) run
Process 22135 launched: './miniruby' (x86_64)
Process 22135 stopped
* thread #1: tid = 0x14a3af, 0x000000010008ac8a miniruby`vm_block_type(block=0x0000000000000000) + 12 at vm_core.h:1364, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x18)
frame #0: 0x000000010008ac8a miniruby`vm_block_type(block=0x0000000000000000) + 12 at vm_core.h:1364
1361 break;
1362 }
1363 #endif
-> 1364 return block->type;
1365 }
1366
1367 static inline void
(lldb) bt
* thread #1: tid = 0x14a3af, 0x000000010008ac8a miniruby`vm_block_type(block=0x0000000000000000) + 12 at vm_core.h:1364, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x18)
* frame #0: 0x000000010008ac8a miniruby`vm_block_type(block=0x0000000000000000) + 12 at vm_core.h:1364
frame #1: 0x000000010008acdb miniruby`vm_block_iseq(block=0x0000000000000000) + 24 at vm_core.h:1399
frame #2: 0x000000010008acc1 miniruby`vm_proc_iseq(procval=4310866360) + 32 at vm_core.h:1387
frame #3: 0x000000010009cbed miniruby`rb_raw_obj_info(buff="0x0000000100f299b8 [0 ] proc (Proc)", buff_size=256, obj=4310866360) + 1513 at gc.c:9349
frame #4: 0x000000010009cf01 miniruby`obj_info(obj=4310866360) + 98 at gc.c:9428
frame #5: 0x000000010008ca1b miniruby`newobj_init(klass=4311027960, flags=12, v1=4298186080, v2=1, v3=0, wb_protected=32, objspace=0x00000001007cf280, obj=4310866360) + 338 at gc.c:1887
frame #6: 0x000000010008cce5 miniruby`newobj_of(klass=4311027960, flags=12, v1=4298186080, v2=1, v3=0, wb_protected=32) + 171 at gc.c:1970
frame #7: 0x000000010008d01d miniruby`rb_data_typed_object_wrap(klass=4311027960, datap=0x0000000000000000, type=0x0000000100311d60) + 133 at gc.c:2062
frame #8: 0x000000010008d04e miniruby`rb_data_typed_object_zalloc(klass=4311027960, size=40, type=0x0000000100311d60) + 42 at gc.c:2073
frame #9: 0x000000010011b459 miniruby`rb_proc_alloc(klass=4311027960) + 36 at proc.c:113
frame #10: 0x0000000100204d8e miniruby`vm_proc_create_from_captured(klass=4311027960, captured=0x00000001025003f8, block_type=block_type_iseq, is_from_method='\0', is_lambda='\x01') + 44 at vm.c:814
frame #11: 0x00000001002050d8 miniruby`rb_vm_make_proc_lambda(ec=0x00000001007cf548, captured=0x00000001025003f8, klass=4311027960, is_lambda='\x01') + 134 at vm.c:892
frame #12: 0x000000010011c0d2 miniruby`proc_new(klass=4311027960, is_lambda='\x01') + 445 at proc.c:752
frame #13: 0x000000010011c154 miniruby`rb_block_lambda + 27 at proc.c:808
frame #14: 0x00000001001ee7e3 miniruby`call_cfunc_0(func=(miniruby`rb_block_lambda at proc.c:807), recv=4310991600, argc=0, argv=0x0000000102400480) + 41 at vm_insnhelper.c:1729
frame #15: 0x00000001001ef2c3 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007cf548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 386 at vm_insnhelper.c:1918
frame #16: 0x00000001001ef412 miniruby`vm_call_cfunc(ec=0x00000001007cf548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 149 at vm_insnhelper.c:1934
frame #17: 0x00000001001f0655 miniruby`vm_call_method_each_type(ec=0x00000001007cf548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 239 at vm_insnhelper.c:2232
frame #18: 0x00000001001f0ce0 miniruby`vm_call_method(ec=0x00000001007cf548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 117 at vm_insnhelper.c:2355
frame #19: 0x00000001001f0eb6 miniruby`vm_call_general(ec=0x00000001007cf548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 59 at vm_insnhelper.c:2398
frame #20: 0x00000001001f6e61 miniruby`vm_exec_core(ec=0x00000001007cf548, initial=0) + 7480 at insns.def:850
frame #21: 0x0000000100207995 miniruby`vm_exec(ec=0x00000001007cf548) + 230 at vm.c:1771
frame #22: 0x0000000100208647 miniruby`rb_iseq_eval_main(iseq=0x0000000100f29fd0) + 52 at vm.c:2019
frame #23: 0x000000010007b750 miniruby`ruby_exec_internal(n=0x0000000100f29fd0) + 297 at eval.c:246
frame #24: 0x000000010007b876 miniruby`ruby_exec_node(n=0x0000000100f29fd0) + 36 at eval.c:310
frame #25: 0x000000010007b849 miniruby`ruby_run_node(n=0x0000000100f29fd0) + 62 at eval.c:302
frame #26: 0x0000000100000c05 miniruby`main(argc=2, argv=0x00007fff5fbfdbf0) + 113 at main.c:42
frame #27: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61565 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:01 +03:00
VALUE n = iseq - > body - > location . first_lineno ;
2016-07-26 13:07:12 +03:00
snprintf ( buff , buff_size , " %s %s@%s:%d " , buff ,
RSTRING_PTR ( iseq - > body - > location . label ) ,
2017-06-12 07:35:52 +03:00
RSTRING_PTR ( path ) ,
fix SEGV touching uninitialized memory
This function can be called from rb_data_typed_object_zalloc().
No assumption can be made about object internals.
(lldb) run
Process 22135 launched: './miniruby' (x86_64)
Process 22135 stopped
* thread #1: tid = 0x14a3af, 0x000000010008ac8a miniruby`vm_block_type(block=0x0000000000000000) + 12 at vm_core.h:1364, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x18)
frame #0: 0x000000010008ac8a miniruby`vm_block_type(block=0x0000000000000000) + 12 at vm_core.h:1364
1361 break;
1362 }
1363 #endif
-> 1364 return block->type;
1365 }
1366
1367 static inline void
(lldb) bt
* thread #1: tid = 0x14a3af, 0x000000010008ac8a miniruby`vm_block_type(block=0x0000000000000000) + 12 at vm_core.h:1364, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x18)
* frame #0: 0x000000010008ac8a miniruby`vm_block_type(block=0x0000000000000000) + 12 at vm_core.h:1364
frame #1: 0x000000010008acdb miniruby`vm_block_iseq(block=0x0000000000000000) + 24 at vm_core.h:1399
frame #2: 0x000000010008acc1 miniruby`vm_proc_iseq(procval=4310866360) + 32 at vm_core.h:1387
frame #3: 0x000000010009cbed miniruby`rb_raw_obj_info(buff="0x0000000100f299b8 [0 ] proc (Proc)", buff_size=256, obj=4310866360) + 1513 at gc.c:9349
frame #4: 0x000000010009cf01 miniruby`obj_info(obj=4310866360) + 98 at gc.c:9428
frame #5: 0x000000010008ca1b miniruby`newobj_init(klass=4311027960, flags=12, v1=4298186080, v2=1, v3=0, wb_protected=32, objspace=0x00000001007cf280, obj=4310866360) + 338 at gc.c:1887
frame #6: 0x000000010008cce5 miniruby`newobj_of(klass=4311027960, flags=12, v1=4298186080, v2=1, v3=0, wb_protected=32) + 171 at gc.c:1970
frame #7: 0x000000010008d01d miniruby`rb_data_typed_object_wrap(klass=4311027960, datap=0x0000000000000000, type=0x0000000100311d60) + 133 at gc.c:2062
frame #8: 0x000000010008d04e miniruby`rb_data_typed_object_zalloc(klass=4311027960, size=40, type=0x0000000100311d60) + 42 at gc.c:2073
frame #9: 0x000000010011b459 miniruby`rb_proc_alloc(klass=4311027960) + 36 at proc.c:113
frame #10: 0x0000000100204d8e miniruby`vm_proc_create_from_captured(klass=4311027960, captured=0x00000001025003f8, block_type=block_type_iseq, is_from_method='\0', is_lambda='\x01') + 44 at vm.c:814
frame #11: 0x00000001002050d8 miniruby`rb_vm_make_proc_lambda(ec=0x00000001007cf548, captured=0x00000001025003f8, klass=4311027960, is_lambda='\x01') + 134 at vm.c:892
frame #12: 0x000000010011c0d2 miniruby`proc_new(klass=4311027960, is_lambda='\x01') + 445 at proc.c:752
frame #13: 0x000000010011c154 miniruby`rb_block_lambda + 27 at proc.c:808
frame #14: 0x00000001001ee7e3 miniruby`call_cfunc_0(func=(miniruby`rb_block_lambda at proc.c:807), recv=4310991600, argc=0, argv=0x0000000102400480) + 41 at vm_insnhelper.c:1729
frame #15: 0x00000001001ef2c3 miniruby`vm_call_cfunc_with_frame(ec=0x00000001007cf548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 386 at vm_insnhelper.c:1918
frame #16: 0x00000001001ef412 miniruby`vm_call_cfunc(ec=0x00000001007cf548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 149 at vm_insnhelper.c:1934
frame #17: 0x00000001001f0655 miniruby`vm_call_method_each_type(ec=0x00000001007cf548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 239 at vm_insnhelper.c:2232
frame #18: 0x00000001001f0ce0 miniruby`vm_call_method(ec=0x00000001007cf548, cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 117 at vm_insnhelper.c:2355
frame #19: 0x00000001001f0eb6 miniruby`vm_call_general(ec=0x00000001007cf548, reg_cfp=0x00000001025003e0, calling=0x00007fff5fbfd4d0, ci=0x0000000102537be0, cc=0x000000010253e0f0) + 59 at vm_insnhelper.c:2398
frame #20: 0x00000001001f6e61 miniruby`vm_exec_core(ec=0x00000001007cf548, initial=0) + 7480 at insns.def:850
frame #21: 0x0000000100207995 miniruby`vm_exec(ec=0x00000001007cf548) + 230 at vm.c:1771
frame #22: 0x0000000100208647 miniruby`rb_iseq_eval_main(iseq=0x0000000100f29fd0) + 52 at vm.c:2019
frame #23: 0x000000010007b750 miniruby`ruby_exec_internal(n=0x0000000100f29fd0) + 297 at eval.c:246
frame #24: 0x000000010007b876 miniruby`ruby_exec_node(n=0x0000000100f29fd0) + 36 at eval.c:310
frame #25: 0x000000010007b849 miniruby`ruby_run_node(n=0x0000000100f29fd0) + 62 at eval.c:302
frame #26: 0x0000000100000c05 miniruby`main(argc=2, argv=0x00007fff5fbfdbf0) + 113 at main.c:42
frame #27: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61565 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:01 +03:00
n ? FIX2INT ( n ) : 0 ) ;
2016-07-26 13:07:12 +03:00
}
}
2015-07-02 12:36:59 +03:00
const char *
rb_raw_obj_info ( char * buff , const int buff_size , VALUE obj )
2014-09-08 08:11:00 +04:00
{
2015-11-18 11:08:09 +03:00
if ( SPECIAL_CONST_P ( obj ) ) {
snprintf ( buff , buff_size , " %s " , obj_type_name ( obj ) ) ;
2018-10-31 01:11:51 +03:00
if ( FIXNUM_P ( obj ) ) {
snprintf ( buff , buff_size , " %s %ld " , buff , FIX2LONG ( obj ) ) ;
}
else if ( SYMBOL_P ( obj ) ) {
snprintf ( buff , buff_size , " %s %s " , buff , rb_id2name ( SYM2ID ( obj ) ) ) ;
}
2015-11-18 11:08:09 +03:00
}
else {
2014-09-08 08:11:00 +04:00
# define TF(c) ((c) != 0 ? "true" : "false")
# define C(c, s) ((c) != 0 ? (s) : " ")
2015-11-22 16:41:08 +03:00
const int type = BUILTIN_TYPE ( obj ) ;
# if USE_RGENGC
const int age = RVALUE_FLAGS_AGE ( RBASIC ( obj ) - > flags ) ;
2018-10-31 00:53:56 +03:00
if ( is_pointer_to_heap ( & rb_objspace , ( void * ) obj ) ) {
snprintf ( buff , buff_size , " %p [%d%s%s%s%s] %s " ,
( void * ) obj , age ,
C ( RVALUE_UNCOLLECTIBLE_BITMAP ( obj ) , " L " ) ,
C ( RVALUE_MARK_BITMAP ( obj ) , " M " ) ,
C ( RVALUE_MARKING_BITMAP ( obj ) , " R " ) ,
C ( RVALUE_WB_UNPROTECTED_BITMAP ( obj ) , " U " ) ,
obj_type_name ( obj ) ) ;
}
else {
/* fake */
snprintf ( buff , buff_size , " %p [%dXXXX] %s " ,
( void * ) obj , age ,
obj_type_name ( obj ) ) ;
}
2015-11-22 16:41:08 +03:00
# else
snprintf ( buff , buff_size , " %p [%s] %s " ,
( void * ) obj ,
C ( RVALUE_MARK_BITMAP ( obj ) , " M " ) ,
obj_type_name ( obj ) ) ;
# endif
2014-09-08 08:11:00 +04:00
2015-11-18 11:08:09 +03:00
if ( internal_object_p ( obj ) ) {
/* ignore */
}
else if ( RBASIC ( obj ) - > klass = = 0 ) {
snprintf ( buff , buff_size , " %s (temporary internal) " , buff ) ;
}
else {
VALUE class_path = rb_class_path_cached ( RBASIC ( obj ) - > klass ) ;
if ( ! NIL_P ( class_path ) ) {
snprintf ( buff , buff_size , " %s (%s) " , buff , RSTRING_PTR ( class_path ) ) ;
}
2015-03-20 06:17:06 +03:00
}
2015-03-19 10:16:27 +03:00
# if GC_DEBUG
2015-11-18 11:08:09 +03:00
snprintf ( buff , buff_size , " %s @%s:%d " , buff , RANY ( obj ) - > file , RANY ( obj ) - > line ) ;
2015-03-19 10:16:27 +03:00
# endif
2015-11-18 11:08:09 +03:00
switch ( type ) {
case T_NODE :
2017-11-04 17:32:48 +03:00
UNEXPECTED_NODE ( rb_raw_obj_info ) ;
2015-11-18 11:08:09 +03:00
break ;
case T_ARRAY :
2018-10-31 00:53:56 +03:00
if ( FL_TEST ( obj , ELTS_SHARED ) ) {
snprintf ( buff , buff_size , " %s shared -> %s " , buff ,
rb_obj_info ( RARRAY ( obj ) - > as . heap . aux . shared ) ) ;
}
else if ( FL_TEST ( obj , RARRAY_EMBED_FLAG ) ) {
snprintf ( buff , buff_size , " %s [%s%s] len: %d (embed) " , buff ,
C ( ARY_EMBED_P ( obj ) , " E " ) ,
C ( ARY_SHARED_P ( obj ) , " S " ) ,
( int ) RARRAY_LEN ( obj ) ) ;
}
else {
snprintf ( buff , buff_size , " %s [%s%s%s] len: %d, capa:%d ptr:%p " , buff ,
C ( ARY_EMBED_P ( obj ) , " E " ) ,
C ( ARY_SHARED_P ( obj ) , " S " ) ,
C ( RARRAY_TRANSIENT_P ( obj ) , " T " ) ,
( int ) RARRAY_LEN ( obj ) ,
ARY_EMBED_P ( obj ) ? - 1 : ( int ) RARRAY ( obj ) - > as . heap . aux . capa ,
2018-10-31 04:36:39 +03:00
( void * ) RARRAY_CONST_PTR_TRANSIENT ( obj ) ) ;
2018-10-31 00:53:56 +03:00
}
2015-11-18 11:08:09 +03:00
break ;
case T_STRING : {
2017-10-21 15:18:35 +03:00
snprintf ( buff , buff_size , " %s %s " , buff , RSTRING_PTR ( obj ) ) ;
break ;
2015-07-02 12:36:59 +03:00
}
2018-10-31 01:11:51 +03:00
case T_HASH : {
snprintf ( buff , buff_size , " %s [%c%c] %d " , buff ,
2018-12-14 04:10:15 +03:00
RHASH_AR_TABLE_P ( obj ) ? ' A ' : ' S ' ,
2018-10-31 01:11:51 +03:00
RHASH_TRANSIENT_P ( obj ) ? ' T ' : ' ' ,
( int ) RHASH_SIZE ( obj ) ) ;
break ;
}
2018-12-18 11:11:52 +03:00
case T_CLASS :
2018-12-18 11:11:57 +03:00
case T_MODULE :
2018-12-18 11:11:52 +03:00
{
VALUE class_path = rb_class_path_cached ( obj ) ;
if ( ! NIL_P ( class_path ) ) {
snprintf ( buff , buff_size , " %s %s " , buff , RSTRING_PTR ( class_path ) ) ;
}
break ;
}
2018-12-18 11:11:57 +03:00
case T_ICLASS :
2018-12-18 11:11:52 +03:00
{
VALUE class_path = rb_class_path_cached ( RBASIC_CLASS ( obj ) ) ;
if ( ! NIL_P ( class_path ) ) {
snprintf ( buff , buff_size , " %s src:%s " , buff , RSTRING_PTR ( class_path ) ) ;
}
break ;
}
2018-10-31 01:01:17 +03:00
case T_OBJECT :
{
uint32_t len = ROBJECT_NUMIV ( obj ) ;
if ( RANY ( obj ) - > as . basic . flags & ROBJECT_EMBED ) {
snprintf ( buff , buff_size , " %s (embed) len:%d " , buff , len ) ;
}
else {
VALUE * ptr = ROBJECT_IVPTR ( obj ) ;
2018-10-31 04:36:39 +03:00
snprintf ( buff , buff_size , " %s len:%d ptr:%p " , buff , len , ( void * ) ptr ) ;
2018-10-31 01:01:17 +03:00
}
}
break ;
2015-11-18 11:08:09 +03:00
case T_DATA : {
2018-01-02 18:29:58 +03:00
const struct rb_block * block ;
2017-10-21 15:18:35 +03:00
const rb_iseq_t * iseq ;
2018-01-02 18:29:58 +03:00
if ( rb_obj_is_proc ( obj ) & &
( block = vm_proc_block ( obj ) ) ! = NULL & &
( vm_block_type ( block ) = = block_type_iseq ) & &
( iseq = vm_block_iseq ( block ) ) ! = NULL ) {
2017-10-21 15:18:35 +03:00
rb_raw_iseq_info ( buff , buff_size , iseq ) ;
}
else {
const char * const type_name = rb_objspace_data_type_name ( obj ) ;
if ( type_name ) {
snprintf ( buff , buff_size , " %s %s " , buff , type_name ) ;
}
}
break ;
2015-07-02 12:36:59 +03:00
}
2015-11-18 11:08:09 +03:00
case T_IMEMO : {
fix SEGV touching uninitialized local variable
This imemo_name is used uninitialized because the switch
above does not cover all possible imemo types.
(lldb) run
Process 26068 launched: './miniruby' (x86_64)
Process 26068 stopped
* thread #1: tid = 0x14ba96, 0x00007fff8a402132 libsystem_c.dylib`strlen + 18, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0xfffffffffffffff0)
frame #0: 0x00007fff8a402132 libsystem_c.dylib`strlen + 18
libsystem_c.dylib`strlen:
-> 0x7fff8a402132 <+18>: pcmpeqb (%rdi), %xmm0
0x7fff8a402136 <+22>: pmovmskb %xmm0, %esi
0x7fff8a40213a <+26>: andq $0xf, %rcx
0x7fff8a40213e <+30>: orq $-0x1, %rax
(lldb) bt
* thread #1: tid = 0x14ba96, 0x00007fff8a402132 libsystem_c.dylib`strlen + 18, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0xfffffffffffffff0)
* frame #0: 0x00007fff8a402132 libsystem_c.dylib`strlen + 18
frame #1: 0x00000001001f1531 miniruby`BSD_vfprintf(fp=0x00007fff5fbfc9e0, fmt0="%s %s", ap=0x00007fff5fbfcbf0) + 5873 at vsnprintf.c:1026
frame #2: 0x00000001001ef213 miniruby`ruby_do_vsnprintf(str="0x0000000100f46450 [0 ] T_IMEMO", n=256, fmt="%s %s", ap=0x00007fff5fbfcbf0) + 131 at sprintf.c:1285
frame #3: 0x00000001001ef3ea miniruby`ruby_snprintf(str="0x0000000100f46450 [0 ] T_IMEMO", n=256, fmt="%s %s") + 426 at sprintf.c:1300
frame #4: 0x00000001000bdc61 miniruby`rb_raw_obj_info(buff="0x0000000100f46450 [0 ] T_IMEMO", buff_size=256, obj=4310983760) + 2353 at gc.c:9376
frame #5: 0x00000001000b7bff miniruby`obj_info(obj=4310983760) + 95 at gc.c:9428
frame #6: 0x00000001000c1658 miniruby`newobj_init(klass=0, flags=36890, v1=0, v2=4303040512, v3=4310983800, wb_protected=1, objspace=0x00000001007ee280, obj=4310983760) + 424 at gc.c:1887
frame #7: 0x00000001000b4469 miniruby`newobj_of(klass=0, flags=36890, v1=0, v2=4303040512, v3=4310983800, wb_protected=1) + 217 at gc.c:1970
frame #8: 0x00000001000b45eb miniruby`rb_imemo_new(type=imemo_ast, v1=0, v2=4303040512, v3=4310983800, v0=0) + 75 at gc.c:2017
frame #9: 0x000000010011daed miniruby`rb_ast_new + 61 at node.c:1146
frame #10: 0x0000000100160e15 miniruby`rb_parser_compile_file_path(vparser=4310984400, fname=4310984960, file=4310984080, start=1) + 53 at parse.y:5776
frame #11: 0x00000001001e18ea miniruby`load_file_internal(argp_v=140734799795024) + 1834 at ruby.c:1907
frame #12: 0x00000001000a1bb5 miniruby`rb_ensure(b_proc=(miniruby`load_file_internal at ruby.c:1795), data1=140734799795024, e_proc=(miniruby`restore_load_file at ruby.c:2007), data2=140734799795024) + 245 at eval.c:1037
frame #13: 0x00000001001df4a4 miniruby`load_file(parser=4310984400, fname=4310984960, f=4310984080, script=1, opt=0x00007fff5fbfda28) + 100 at ruby.c:2026
frame #14: 0x00000001001e084e miniruby`process_options(argc=0, argv=0x00007fff5fbfdc00, opt=0x00007fff5fbfda28) + 3454 at ruby.c:1682
frame #15: 0x00000001001dfaae miniruby`ruby_process_options(argc=2, argv=0x00007fff5fbfdbf0) + 238 at ruby.c:2257
frame #16: 0x000000010009ff43 miniruby`ruby_options(argc=2, argv=0x00007fff5fbfdbf0) + 211 at eval.c:105
frame #17: 0x0000000100000989 miniruby`main(argc=2, argv=0x00007fff5fbfdbf0) + 105 at main.c:42
frame #18: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb) up 4
frame #4: 0x00000001000bdc61 miniruby`rb_raw_obj_info(buff="0x0000000100f46450 [0 ] T_IMEMO", buff_size=256, obj=4310983760) + 2353 at gc.c:9376
9373 #undef IMEMO_NAME
9374 default: UNREACHABLE;
9375 }
-> 9376 snprintf(buff, buff_size, "%s %s", buff, imemo_name);
9377
9378 switch (imemo_type(obj)) {
9379 case imemo_ment: {
(lldb) p imemo_name
(const char *) $0 = 0xffffffffffffffff
(lldb) p imemo_type(obj)
(imemo_type) $1 = imemo_ast
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61566 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:42:02 +03:00
const char * imemo_name = " \0 " ;
2017-10-21 15:18:35 +03:00
switch ( imemo_type ( obj ) ) {
2015-03-18 22:57:53 +03:00
# define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break;
2017-10-21 15:18:35 +03:00
IMEMO_NAME ( env ) ;
IMEMO_NAME ( cref ) ;
IMEMO_NAME ( svar ) ;
IMEMO_NAME ( throw_data ) ;
IMEMO_NAME ( ifunc ) ;
IMEMO_NAME ( memo ) ;
IMEMO_NAME ( ment ) ;
IMEMO_NAME ( iseq ) ;
2018-05-09 10:11:59 +03:00
IMEMO_NAME ( tmpbuf ) ;
2018-10-18 08:51:35 +03:00
IMEMO_NAME ( ast ) ;
IMEMO_NAME ( parser_strterm ) ;
2015-03-18 22:57:53 +03:00
# undef IMEMO_NAME
2017-10-21 15:18:38 +03:00
default : UNREACHABLE ;
2017-10-21 15:18:35 +03:00
}
snprintf ( buff , buff_size , " %s %s " , buff , imemo_name ) ;
switch ( imemo_type ( obj ) ) {
case imemo_ment : {
const rb_method_entry_t * me = & RANY ( obj ) - > as . imemo . ment ;
fix SEGV touching uninitialized memory
This function can be called from InitVM_Object().
No assumption can be made about object internals.
(lldb) run
Process 10675 launched: './miniruby' (x86_64)
Process 10675 stopped
* thread #1: tid = 0x14252c, 0x00000001000bdda9 miniruby`rb_raw_obj_info(buff="0x0000000100fc1588 [0 ] T_IMEMO ment", buff_size=256, obj=4311487880) + 2489 at gc.c:9383, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
frame #0: 0x00000001000bdda9 miniruby`rb_raw_obj_info(buff="0x0000000100fc1588 [0 ] T_IMEMO ment", buff_size=256, obj=4311487880) + 2489 at gc.c:9383
9380 const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
9381 snprintf(buff, buff_size, "%s (called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)", buff,
9382 rb_id2name(me->called_id),
-> 9383 method_type_name(me->def->type),
9384 me->def->alias_count,
9385 obj_info(me->owner),
9386 obj_info(me->defined_class));
(lldb) p *me
(rb_method_entry_t) $0 = {
flags = 24602
defined_class = 4311488400
def = 0x0000000000000000
called_id = 3057
owner = 4311488400
}
(lldb) bt
* thread #1: tid = 0x14252c, 0x00000001000bdda9 miniruby`rb_raw_obj_info(buff="0x0000000100fc1588 [0 ] T_IMEMO ment", buff_size=256, obj=4311487880) + 2489 at gc.c:9383, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
* frame #0: 0x00000001000bdda9 miniruby`rb_raw_obj_info(buff="0x0000000100fc1588 [0 ] T_IMEMO ment", buff_size=256, obj=4311487880) + 2489 at gc.c:9383
frame #1: 0x00000001000b7cbf miniruby`obj_info(obj=4311487880) + 95 at gc.c:9423
frame #2: 0x00000001000c16a8 miniruby`newobj_init(klass=4311488400, flags=24602, v1=0, v2=3057, v3=4311488400, wb_protected=1, objspace=0x00000001007ee280, obj=4311487880) + 424 at gc.c:1887
frame #3: 0x00000001000b4529 miniruby`newobj_of(klass=4311488400, flags=24602, v1=0, v2=3057, v3=4311488400, wb_protected=1) + 217 at gc.c:1970
frame #4: 0x00000001000b46ab miniruby`rb_imemo_new(type=imemo_ment, v1=0, v2=3057, v3=4311488400, v0=4311488400) + 75 at gc.c:2017
frame #5: 0x00000001002773b4 miniruby`rb_method_entry_alloc(called_id=3057, owner=4311488400, defined_class=4311488400, def=0x0000000000000000) + 52 at vm_method.c:368
frame #6: 0x0000000100277307 miniruby`rb_method_entry_create(called_id=3057, klass=4311488400, visi=METHOD_VISI_PRIVATE, def=0x0000000000000000) + 71 at vm_method.c:389
frame #7: 0x00000001002784c7 miniruby`rb_method_entry_make(klass=4311488400, mid=3057, defined_class=4311488400, visi=METHOD_VISI_PRIVATE, type=VM_METHOD_TYPE_CFUNC, def=0x0000000000000000, original_id=3057, opts=0x00007fff5fbfd9e8) + 1207 at vm_method.c:594
frame #8: 0x00000001002770f9 miniruby`rb_add_method(klass=4311488400, mid=3057, type=VM_METHOD_TYPE_CFUNC, opts=0x00007fff5fbfd9e8, visi=METHOD_VISI_PRIVATE) + 73 at vm_method.c:650
frame #9: 0x000000010027708a miniruby`rb_add_method_cfunc(klass=4311488400, mid=3057, func=(miniruby`rb_obj_dummy at object.c:1125), argc=0, visi=METHOD_VISI_PRIVATE) + 138 at vm_method.c:137
frame #10: 0x00000001000391e4 miniruby`rb_define_private_method(klass=4311488400, name="initialize", func=(miniruby`rb_obj_dummy at object.c:1125), argc=0) + 68 at class.c:1529
frame #11: 0x000000010013f5bf miniruby`InitVM_Object + 47 at object.c:3905
frame #12: 0x0000000100142ffd miniruby`Init_Object + 61 at object.c:4122
frame #13: 0x00000001000d4edd miniruby`rb_call_inits + 29 at inits.c:23
frame #14: 0x000000010009fe66 miniruby`ruby_setup + 198 at eval.c:61
frame #15: 0x000000010009febd miniruby`ruby_init + 13 at eval.c:78
frame #16: 0x0000000100000a4d miniruby`main(argc=2, argv=0x00007fff5fbfdbf0) + 93 at main.c:41
frame #17: 0x00007fff88eda5ad libdyld.dylib`start + 1
(lldb)
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61563 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-01-02 09:41:59 +03:00
if ( me - > def ) {
snprintf ( buff , buff_size , " %s (called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s) " , buff ,
rb_id2name ( me - > called_id ) ,
method_type_name ( me - > def - > type ) ,
me - > def - > alias_count ,
obj_info ( me - > owner ) ,
obj_info ( me - > defined_class ) ) ;
}
else {
snprintf ( buff , buff_size , " %s " , rb_id2name ( me - > called_id ) ) ;
}
2017-10-21 15:18:35 +03:00
break ;
2015-11-18 11:08:09 +03:00
}
2017-10-21 15:18:35 +03:00
case imemo_iseq : {
const rb_iseq_t * iseq = ( const rb_iseq_t * ) obj ;
rb_raw_iseq_info ( buff , buff_size , iseq ) ;
break ;
2015-11-18 11:08:09 +03:00
}
2017-10-21 15:18:35 +03:00
default :
break ;
}
2015-07-02 12:36:59 +03:00
}
2015-11-18 11:08:09 +03:00
default :
break ;
}
2014-09-08 08:11:00 +04:00
# undef TF
# undef C
2015-11-18 11:08:09 +03:00
}
2014-09-08 08:11:00 +04:00
return buff ;
}
2015-07-02 12:36:59 +03:00
# if RGENGC_OBJ_INFO
# define OBJ_INFO_BUFFERS_NUM 10
# define OBJ_INFO_BUFFERS_SIZE 0x100
static int obj_info_buffers_index = 0 ;
static char obj_info_buffers [ OBJ_INFO_BUFFERS_NUM ] [ OBJ_INFO_BUFFERS_SIZE ] ;
static const char *
obj_info ( VALUE obj )
{
const int index = obj_info_buffers_index + + ;
char * const buff = & obj_info_buffers [ index ] [ 0 ] ;
if ( obj_info_buffers_index > = OBJ_INFO_BUFFERS_NUM ) {
obj_info_buffers_index = 0 ;
}
return rb_raw_obj_info ( buff , OBJ_INFO_BUFFERS_SIZE , obj ) ;
}
2014-09-08 08:11:00 +04:00
# else
static const char *
obj_info ( VALUE obj )
{
return obj_type_name ( obj ) ;
}
# endif
2018-02-11 07:20:32 +03:00
MJIT_FUNC_EXPORTED const char *
2015-05-31 22:12:42 +03:00
rb_obj_info ( VALUE obj )
{
if ( ! rb_special_const_p ( obj ) ) {
return obj_info ( obj ) ;
}
else {
return obj_type_name ( obj ) ;
}
}
2015-10-06 19:22:54 +03:00
void
rb_obj_info_dump ( VALUE obj )
{
char buff [ 0x100 ] ;
fprintf ( stderr , " rb_obj_info_dump: %s \n " , rb_raw_obj_info ( buff , 0x100 , obj ) ) ;
}
2013-08-19 16:00:51 +04:00
# if GC_DEBUG
2013-07-18 03:19:38 +04:00
2012-11-22 19:03:46 +04:00
void
rb_gcdebug_print_obj_condition ( VALUE obj )
{
rb_objspace_t * objspace = & rb_objspace ;
2014-05-18 15:02:43 +04:00
fprintf ( stderr , " created at: %s:%d \n " , RANY ( obj ) - > file , RANY ( obj ) - > line ) ;
2013-08-20 21:32:18 +04:00
2012-11-22 19:03:46 +04:00
if ( is_pointer_to_heap ( objspace , ( void * ) obj ) ) {
fprintf ( stderr , " pointer to heap?: true \n " ) ;
}
else {
fprintf ( stderr , " pointer to heap?: false \n " ) ;
return ;
}
2013-08-20 21:32:18 +04:00
2013-12-13 06:38:05 +04:00
fprintf ( stderr , " marked? : %s \n " , MARKED_IN_BITMAP ( GET_HEAP_MARK_BITS ( obj ) , obj ) ? " true " : " false " ) ;
2013-08-20 21:32:18 +04:00
# if USE_RGENGC
2014-09-08 08:11:00 +04:00
fprintf ( stderr , " age? : %d \n " , RVALUE_AGE ( obj ) ) ;
2013-12-13 06:38:05 +04:00
fprintf ( stderr , " old? : %s \n " , RVALUE_OLD_P ( obj ) ? " true " : " false " ) ;
2014-09-08 08:11:00 +04:00
fprintf ( stderr , " WB-protected?: %s \n " , RVALUE_WB_UNPROTECTED ( obj ) ? " false " : " true " ) ;
2014-09-28 19:09:40 +04:00
fprintf ( stderr , " remembered? : %s \n " , RVALUE_REMEMBERED ( obj ) ? " true " : " false " ) ;
2013-08-20 21:32:18 +04:00
# endif
2013-10-26 14:42:07 +04:00
if ( is_lazy_sweeping ( heap_eden ) ) {
2012-11-22 19:03:46 +04:00
fprintf ( stderr , " lazy sweeping?: true \n " ) ;
2013-08-20 21:32:18 +04:00
fprintf ( stderr , " swept?: %s \n " , is_swept_object ( objspace , obj ) ? " done " : " not yet " ) ;
2012-11-22 19:03:46 +04:00
}
else {
fprintf ( stderr , " lazy sweeping?: false \n " ) ;
}
}
static VALUE
2014-06-09 11:01:44 +04:00
gcdebug_sentinel ( VALUE obj , VALUE name )
2012-11-22 19:03:46 +04:00
{
fprintf ( stderr , " WARNING: object %s(%p) is inadvertently collected \n " , ( char * ) name , ( void * ) obj ) ;
return Qnil ;
}
void
rb_gcdebug_sentinel ( VALUE obj , const char * name )
{
2014-06-09 11:01:44 +04:00
rb_define_finalizer ( obj , rb_proc_new ( gcdebug_sentinel , ( VALUE ) name ) ) ;
2012-11-22 19:03:46 +04:00
}
2014-09-08 08:11:00 +04:00
2012-11-22 19:03:46 +04:00
# endif /* GC_DEBUG */
2015-05-27 05:08:29 +03:00
# if GC_DEBUG_STRESS_TO_CLASS
2018-02-23 05:18:52 +03:00
/*
* call - seq :
* GC . add_stress_to_class ( class [ , . . . ] )
*
* Raises NoMemoryError when allocating an instance of the given classes .
*
*/
2015-05-27 05:08:29 +03:00
static VALUE
rb_gcdebug_add_stress_to_class ( int argc , VALUE * argv , VALUE self )
{
rb_objspace_t * objspace = & rb_objspace ;
if ( ! stress_to_class ) {
stress_to_class = rb_ary_tmp_new ( argc ) ;
}
rb_ary_cat ( stress_to_class , argv , argc ) ;
return self ;
}
2018-02-23 05:18:52 +03:00
/*
* call - seq :
* GC . remove_stress_to_class ( class [ , . . . ] )
*
* No longer raises NoMemoryError when allocating an instance of the
* given classes .
*
*/
2015-05-27 05:08:29 +03:00
static VALUE
rb_gcdebug_remove_stress_to_class ( int argc , VALUE * argv , VALUE self )
{
rb_objspace_t * objspace = & rb_objspace ;
int i ;
if ( stress_to_class ) {
for ( i = 0 ; i < argc ; + + i ) {
rb_ary_delete_same ( stress_to_class , argv [ i ] ) ;
}
if ( RARRAY_LEN ( stress_to_class ) = = 0 ) {
stress_to_class = 0 ;
}
}
return Qnil ;
}
# endif
2012-08-05 14:39:37 +04:00
/*
2013-06-17 12:44:56 +04:00
* Document - module : ObjectSpace
2012-08-05 14:39:37 +04:00
*
2012-11-29 12:15:53 +04:00
* The ObjectSpace module contains a number of routines
2012-08-05 14:39:37 +04:00
* that interact with the garbage collection facility and allow you to
* traverse all living objects with an iterator .
*
2012-11-29 12:15:53 +04:00
* ObjectSpace also provides support for object finalizers , procs that will be
* called when a specific object is about to be destroyed by garbage
* collection .
2012-08-05 14:39:37 +04:00
*
2015-04-12 08:19:22 +03:00
* require ' objspace '
*
2012-08-05 14:39:37 +04:00
* a = " A "
* b = " B "
*
2013-06-17 12:39:39 +04:00
* ObjectSpace . define_finalizer ( a , proc { | id | puts " Finalizer one on #{id} " } )
* ObjectSpace . define_finalizer ( b , proc { | id | puts " Finalizer two on #{id} " } )
2012-08-05 14:39:37 +04:00
*
2012-11-29 12:15:53 +04:00
* _produces : _
2012-08-05 14:39:37 +04:00
*
2013-06-17 12:39:39 +04:00
* Finalizer two on 537763470
2012-08-05 14:39:37 +04:00
* Finalizer one on 537763480
*/
/*
* Document - class : ObjectSpace : : WeakMap
*
2012-11-29 12:15:53 +04:00
* An ObjectSpace : : WeakMap object holds references to
2013-02-23 11:06:10 +04:00
* any objects , but those objects can get garbage collected .
2012-12-02 11:45:17 +04:00
*
* This class is mostly used internally by WeakRef , please use
* + lib / weakref . rb + for the public interface .
2012-08-05 14:39:37 +04:00
*/
2011-07-08 04:18:39 +04:00
/* Document-class: GC::Profiler
*
* The GC profiler provides access to information on GC runs including time ,
* length and object space size .
*
* Example :
*
* GC : : Profiler . enable
*
* require ' rdoc / rdoc '
*
2012-11-29 12:15:53 +04:00
* GC : : Profiler . report
2011-07-08 04:18:39 +04:00
*
* GC : : Profiler . disable
*
* See also GC . count , GC . malloc_allocated_size and GC . malloc_allocations
*/
2010-05-28 15:13:42 +04:00
2003-12-22 09:20:14 +03:00
/*
2012-11-29 12:15:53 +04:00
* The GC module provides an interface to Ruby ' s mark and
* sweep garbage collection mechanism .
*
* Some of the underlying methods are also available via the ObjectSpace
* module .
2011-07-08 04:18:39 +04:00
*
* You may obtain information about the operation of the GC through
* GC : : Profiler .
2003-12-22 09:20:14 +03:00
*/
1998-01-16 15:13:05 +03:00
void
* array.c: moved to ANSI function style from K&R function style.
(used protoize on windows, so still K&R remains on #ifdef part of
other platforms. And `foo _((boo))' stuff is still there)
[ruby-dev:26975]
* bignum.c, class.c, compar.c, dir.c, dln.c, dmyext.c, enum.c,
enumerator.c, error.c, eval.c, file.c, gc.c, hash.c, inits.c,
io.c, main.c, marshal.c, math.c, numeric.c, object.c, pack.c,
prec.c, process.c, random.c, range.c, re.c, regcomp.c, regenc.c,
regerror.c, regexec.c, regparse.c, regparse.h, ruby.c, signal.c,
sprintf.c, st.c, string.c, struct.c, time.c, util.h, variable.c,
version.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@9126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2005-09-12 14:44:21 +04:00
Init_GC ( void )
1998-01-16 15:13:05 +03:00
{
2014-07-03 07:38:10 +04:00
# undef rb_intern
2013-06-18 16:59:02 +04:00
VALUE rb_mObjSpace ;
2008-08-11 13:36:57 +04:00
VALUE rb_mProfiler ;
2013-11-27 10:57:14 +04:00
VALUE gc_constants ;
1999-01-20 07:59:39 +03:00
rb_mGC = rb_define_module ( " GC " ) ;
2013-12-06 09:11:51 +04:00
rb_define_singleton_method ( rb_mGC , " start " , gc_start_internal , - 1 ) ;
2001-01-29 08:10:42 +03:00
rb_define_singleton_method ( rb_mGC , " enable " , rb_gc_enable , 0 ) ;
rb_define_singleton_method ( rb_mGC , " disable " , rb_gc_disable , 0 ) ;
2006-01-10 01:32:55 +03:00
rb_define_singleton_method ( rb_mGC , " stress " , gc_stress_get , 0 ) ;
2014-09-09 08:56:55 +04:00
rb_define_singleton_method ( rb_mGC , " stress= " , gc_stress_set_m , 1 ) ;
2008-04-27 10:28:08 +04:00
rb_define_singleton_method ( rb_mGC , " count " , gc_count , 0 ) ;
2010-10-27 23:02:24 +04:00
rb_define_singleton_method ( rb_mGC , " stat " , gc_stat , - 1 ) ;
2013-12-05 14:30:38 +04:00
rb_define_singleton_method ( rb_mGC , " latest_gc_info " , gc_latest_gc_info , - 1 ) ;
2013-12-06 09:11:51 +04:00
rb_define_method ( rb_mGC , " garbage_collect " , gc_start_internal , - 1 ) ;
1999-01-20 07:59:39 +03:00
2013-11-27 10:57:14 +04:00
gc_constants = rb_hash_new ( ) ;
rb_hash_aset ( gc_constants , ID2SYM ( rb_intern ( " RVALUE_SIZE " ) ) , SIZET2NUM ( sizeof ( RVALUE ) ) ) ;
2016-01-09 01:15:40 +03:00
rb_hash_aset ( gc_constants , ID2SYM ( rb_intern ( " HEAP_PAGE_OBJ_LIMIT " ) ) , SIZET2NUM ( HEAP_PAGE_OBJ_LIMIT ) ) ;
rb_hash_aset ( gc_constants , ID2SYM ( rb_intern ( " HEAP_PAGE_BITMAP_SIZE " ) ) , SIZET2NUM ( HEAP_PAGE_BITMAP_SIZE ) ) ;
rb_hash_aset ( gc_constants , ID2SYM ( rb_intern ( " HEAP_PAGE_BITMAP_PLANES " ) ) , SIZET2NUM ( HEAP_PAGE_BITMAP_PLANES ) ) ;
2013-11-27 10:57:14 +04:00
OBJ_FREEZE ( gc_constants ) ;
2018-02-23 05:18:52 +03:00
/* internal constants */
2013-11-27 10:57:14 +04:00
rb_define_const ( rb_mGC , " INTERNAL_CONSTANTS " , gc_constants ) ;
2008-08-11 13:36:57 +04:00
rb_mProfiler = rb_define_module_under ( rb_mGC , " Profiler " ) ;
rb_define_singleton_method ( rb_mProfiler , " enabled? " , gc_profile_enable_get , 0 ) ;
rb_define_singleton_method ( rb_mProfiler , " enable " , gc_profile_enable , 0 ) ;
2011-09-08 07:57:41 +04:00
rb_define_singleton_method ( rb_mProfiler , " raw_data " , gc_profile_record_get , 0 ) ;
2008-08-11 13:36:57 +04:00
rb_define_singleton_method ( rb_mProfiler , " disable " , gc_profile_disable , 0 ) ;
rb_define_singleton_method ( rb_mProfiler , " clear " , gc_profile_clear , 0 ) ;
rb_define_singleton_method ( rb_mProfiler , " result " , gc_profile_result , 0 ) ;
rb_define_singleton_method ( rb_mProfiler , " report " , gc_profile_report , - 1 ) ;
2010-03-04 07:51:43 +03:00
rb_define_singleton_method ( rb_mProfiler , " total_time " , gc_profile_total_time , 0 ) ;
2008-08-11 13:36:57 +04:00
2013-06-18 16:59:02 +04:00
rb_mObjSpace = rb_define_module ( " ObjectSpace " ) ;
rb_define_module_function ( rb_mObjSpace , " each_object " , os_each_obj , - 1 ) ;
2013-12-06 09:11:51 +04:00
rb_define_module_function ( rb_mObjSpace , " garbage_collect " , gc_start_internal , - 1 ) ;
2000-07-15 17:37:03 +04:00
2013-06-18 16:59:02 +04:00
rb_define_module_function ( rb_mObjSpace , " define_finalizer " , define_final , - 1 ) ;
rb_define_module_function ( rb_mObjSpace , " undefine_finalizer " , undefine_final , 1 ) ;
2000-07-15 17:37:03 +04:00
2013-06-18 16:59:02 +04:00
rb_define_module_function ( rb_mObjSpace , " _id2ref " , id2ref , 1 ) ;
1998-01-16 15:13:05 +03:00
2014-09-11 14:53:48 +04:00
rb_vm_register_special_exception ( ruby_error_nomemory , rb_eNoMemError , " failed to allocate memory " ) ;
2006-03-02 08:22:30 +03:00
2010-12-17 16:25:19 +03:00
rb_define_method ( rb_cBasicObject , " __id__ " , rb_obj_id , 0 ) ;
2010-12-22 08:38:41 +03:00
rb_define_method ( rb_mKernel , " object_id " , rb_obj_id , 0 ) ;
2007-11-03 18:09:10 +03:00
2013-06-18 16:59:02 +04:00
rb_define_module_function ( rb_mObjSpace , " count_objects " , count_objects , - 1 ) ;
2008-06-08 14:27:06 +04:00
2012-03-13 07:37:06 +04:00
{
2013-06-18 16:59:02 +04:00
VALUE rb_cWeakMap = rb_define_class_under ( rb_mObjSpace , " WeakMap " , rb_cObject ) ;
2012-03-13 07:37:06 +04:00
rb_define_alloc_func ( rb_cWeakMap , wmap_allocate ) ;
rb_define_method ( rb_cWeakMap , " []= " , wmap_aset , 2 ) ;
rb_define_method ( rb_cWeakMap , " [] " , wmap_aref , 1 ) ;
2013-10-18 10:59:12 +04:00
rb_define_method ( rb_cWeakMap , " include? " , wmap_has_key , 1 ) ;
rb_define_method ( rb_cWeakMap , " member? " , wmap_has_key , 1 ) ;
2013-10-19 03:50:13 +04:00
rb_define_method ( rb_cWeakMap , " key? " , wmap_has_key , 1 ) ;
2013-10-18 10:59:14 +04:00
rb_define_method ( rb_cWeakMap , " inspect " , wmap_inspect , 0 ) ;
rb_define_method ( rb_cWeakMap , " each " , wmap_each , 0 ) ;
rb_define_method ( rb_cWeakMap , " each_pair " , wmap_each , 0 ) ;
rb_define_method ( rb_cWeakMap , " each_key " , wmap_each_key , 0 ) ;
rb_define_method ( rb_cWeakMap , " each_value " , wmap_each_value , 0 ) ;
rb_define_method ( rb_cWeakMap , " keys " , wmap_keys , 0 ) ;
rb_define_method ( rb_cWeakMap , " values " , wmap_values , 0 ) ;
2013-12-09 11:13:40 +04:00
rb_define_method ( rb_cWeakMap , " size " , wmap_size , 0 ) ;
rb_define_method ( rb_cWeakMap , " length " , wmap_size , 0 ) ;
2012-03-13 07:37:06 +04:00
rb_define_private_method ( rb_cWeakMap , " finalize " , wmap_finalize , 1 ) ;
2013-10-18 10:59:14 +04:00
rb_include_module ( rb_cWeakMap , rb_mEnumerable ) ;
2012-03-13 07:37:06 +04:00
}
2013-12-16 08:12:48 +04:00
/* internal methods */
rb_define_singleton_method ( rb_mGC , " verify_internal_consistency " , gc_verify_internal_consistency , 0 ) ;
2018-10-31 00:53:56 +03:00
rb_define_singleton_method ( rb_mGC , " verify_transient_heap_internal_consistency " , gc_verify_transient_heap_internal_consistency , 0 ) ;
2013-12-05 08:54:20 +04:00
# if MALLOC_ALLOCATED_SIZE
2008-06-08 14:27:06 +04:00
rb_define_singleton_method ( rb_mGC , " malloc_allocated_size " , gc_malloc_allocated_size , 0 ) ;
rb_define_singleton_method ( rb_mGC , " malloc_allocations " , gc_malloc_allocations , 0 ) ;
# endif
2013-11-06 02:13:51 +04:00
2015-05-27 05:08:29 +03:00
# if GC_DEBUG_STRESS_TO_CLASS
rb_define_singleton_method ( rb_mGC , " add_stress_to_class " , rb_gcdebug_add_stress_to_class , - 1 ) ;
rb_define_singleton_method ( rb_mGC , " remove_stress_to_class " , rb_gcdebug_remove_stress_to_class , - 1 ) ;
# endif
2013-11-06 02:13:51 +04:00
{
VALUE opts ;
2018-02-23 05:18:52 +03:00
/* GC build options */
2013-11-06 02:13:51 +04:00
rb_define_const ( rb_mGC , " OPTS " , opts = rb_ary_new ( ) ) ;
2015-06-24 06:47:37 +03:00
# define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
2013-11-06 02:13:51 +04:00
OPT ( GC_DEBUG ) ;
OPT ( USE_RGENGC ) ;
OPT ( RGENGC_DEBUG ) ;
OPT ( RGENGC_CHECK_MODE ) ;
OPT ( RGENGC_PROFILE ) ;
2013-11-24 23:49:02 +04:00
OPT ( RGENGC_ESTIMATE_OLDMALLOC ) ;
2013-11-06 02:13:51 +04:00
OPT ( GC_PROFILE_MORE_DETAIL ) ;
OPT ( GC_ENABLE_LAZY_SWEEP ) ;
OPT ( CALC_EXACT_MALLOC_SIZE ) ;
2013-12-05 08:54:20 +04:00
OPT ( MALLOC_ALLOCATED_SIZE ) ;
OPT ( MALLOC_ALLOCATED_SIZE_CHECK ) ;
2013-11-09 03:59:20 +04:00
OPT ( GC_PROFILE_DETAIL_MEMORY ) ;
2013-11-06 02:13:51 +04:00
# undef OPT
2014-06-30 18:59:46 +04:00
OBJ_FREEZE ( opts ) ;
2013-11-06 02:13:51 +04:00
}
1998-01-16 15:13:05 +03:00
}
2018-06-20 10:53:29 +03:00
# ifdef ruby_xmalloc
# undef ruby_xmalloc
# endif
# ifdef ruby_xmalloc2
# undef ruby_xmalloc2
# endif
# ifdef ruby_xcalloc
# undef ruby_xcalloc
# endif
# ifdef ruby_xrealloc
# undef ruby_xrealloc
# endif
# ifdef ruby_xrealloc2
# undef ruby_xrealloc2
# endif
void *
ruby_xmalloc ( size_t size )
{
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
ruby_malloc_info_file = __FILE__ ;
ruby_malloc_info_line = __LINE__ ;
# endif
return ruby_xmalloc_body ( size ) ;
}
void *
ruby_xmalloc2 ( size_t n , size_t size )
{
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
ruby_malloc_info_file = __FILE__ ;
ruby_malloc_info_line = __LINE__ ;
# endif
return ruby_xmalloc2_body ( n , size ) ;
}
void *
ruby_xcalloc ( size_t n , size_t size )
{
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
ruby_malloc_info_file = __FILE__ ;
ruby_malloc_info_line = __LINE__ ;
# endif
return ruby_xcalloc_body ( n , size ) ;
}
void *
ruby_xrealloc ( void * ptr , size_t new_size )
{
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
ruby_malloc_info_file = __FILE__ ;
ruby_malloc_info_line = __LINE__ ;
# endif
return ruby_xrealloc_body ( ptr , new_size ) ;
}
void *
ruby_xrealloc2 ( void * ptr , size_t n , size_t new_size )
{
# if USE_GC_MALLOC_OBJ_INFO_DETAILS
ruby_malloc_info_file = __FILE__ ;
ruby_malloc_info_line = __LINE__ ;
# endif
return ruby_xrealloc2_body ( ptr , n , new_size ) ;
}