зеркало из https://github.com/github/ruby.git
[Feature #20470] Split GC into gc_impl.c
This commit splits gc.c into two files: - gc.c now only contains code not specific to Ruby GC. This includes code to mark objects (which the GC implementation may choose not to use) and wrappers for internal APIs that the implementation may need to use (e.g. locking the VM). - gc_impl.c now contains the implementation of Ruby's GC. This includes marking, sweeping, compaction, and statistics. Most importantly, gc_impl.c only uses public APIs in Ruby and a limited set of functions exposed in gc.c. This allows us to build gc_impl.c independently of Ruby and plug Ruby's GC into itself.
This commit is contained in:
Родитель
9aa62bda46
Коммит
51bd816517
|
@ -130,6 +130,7 @@ COMMONOBJS = array.$(OBJEXT) \
|
|||
eval.$(OBJEXT) \
|
||||
file.$(OBJEXT) \
|
||||
gc.$(OBJEXT) \
|
||||
gc_impl.$(OBJEXT) \
|
||||
hash.$(OBJEXT) \
|
||||
inits.$(OBJEXT) \
|
||||
imemo.$(OBJEXT) \
|
||||
|
|
2
eval.c
2
eval.c
|
@ -161,7 +161,7 @@ rb_ec_finalize(rb_execution_context_t *ec)
|
|||
{
|
||||
ruby_sig_finalize();
|
||||
ec->errinfo = Qnil;
|
||||
rb_objspace_call_finalizer(rb_ec_vm_ptr(ec)->objspace);
|
||||
rb_objspace_call_finalizer();
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -577,7 +577,7 @@ reachable_object_from_i(VALUE obj, void *data_ptr)
|
|||
VALUE key = obj;
|
||||
VALUE val = obj;
|
||||
|
||||
if (rb_objspace_markable_object_p(obj)) {
|
||||
if (!rb_objspace_garbage_object_p(obj)) {
|
||||
if (NIL_P(rb_hash_lookup(data->refs, key))) {
|
||||
rb_hash_aset(data->refs, key, Qtrue);
|
||||
|
||||
|
@ -643,7 +643,7 @@ collect_values(st_data_t key, st_data_t value, st_data_t data)
|
|||
static VALUE
|
||||
reachable_objects_from(VALUE self, VALUE obj)
|
||||
{
|
||||
if (rb_objspace_markable_object_p(obj)) {
|
||||
if (!RB_SPECIAL_CONST_P(obj)) {
|
||||
struct rof_data data;
|
||||
|
||||
if (rb_typeddata_is_kind_of(obj, &iow_data_type)) {
|
||||
|
@ -690,7 +690,7 @@ reachable_object_from_root_i(const char *category, VALUE obj, void *ptr)
|
|||
rb_hash_aset(data->categories, category_str, category_objects);
|
||||
}
|
||||
|
||||
if (rb_objspace_markable_object_p(obj) &&
|
||||
if (!rb_objspace_garbage_object_p(obj) &&
|
||||
obj != data->categories &&
|
||||
obj != data->last_category_objects) {
|
||||
if (rb_objspace_internal_object_p(obj)) {
|
||||
|
|
11882
gc.c
11882
gc.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
27
gc.rb
27
gc.rb
|
@ -267,26 +267,6 @@ module GC
|
|||
Primitive.gc_latest_gc_info hash_or_key
|
||||
end
|
||||
|
||||
if respond_to?(:compact)
|
||||
# call-seq:
|
||||
# GC.verify_compaction_references(toward: nil, double_heap: false) -> hash
|
||||
#
|
||||
# Verify compaction reference consistency.
|
||||
#
|
||||
# This method is implementation specific. During compaction, objects that
|
||||
# were moved are replaced with T_MOVED objects. No object should have a
|
||||
# reference to a T_MOVED object after compaction.
|
||||
#
|
||||
# This function expands the heap to ensure room to move all objects,
|
||||
# compacts the heap to make sure everything moves, updates all references,
|
||||
# then performs a full \GC. If any object contains a reference to a T_MOVED
|
||||
# object, that object should be pushed on the mark stack, and will
|
||||
# make a SEGV.
|
||||
def self.verify_compaction_references(toward: nil, double_heap: false, expand_heap: false)
|
||||
Primitive.gc_verify_compaction_references(double_heap, expand_heap, toward == :empty)
|
||||
end
|
||||
end
|
||||
|
||||
# call-seq:
|
||||
# GC.measure_total_time = true/false
|
||||
#
|
||||
|
@ -295,8 +275,7 @@ module GC
|
|||
# Note that \GC time measurement can cause some performance overhead.
|
||||
def self.measure_total_time=(flag)
|
||||
Primitive.cstmt! %{
|
||||
rb_objspace.flags.measure_gc = RTEST(flag) ? TRUE : FALSE;
|
||||
return flag;
|
||||
return rb_gc_impl_set_measure_total_time(rb_gc_get_objspace(), flag);
|
||||
}
|
||||
end
|
||||
|
||||
|
@ -307,7 +286,7 @@ module GC
|
|||
# Note that measurement can affect the application performance.
|
||||
def self.measure_total_time
|
||||
Primitive.cexpr! %{
|
||||
RBOOL(rb_objspace.flags.measure_gc)
|
||||
rb_gc_impl_get_measure_total_time(rb_gc_get_objspace())
|
||||
}
|
||||
end
|
||||
|
||||
|
@ -317,7 +296,7 @@ module GC
|
|||
# Return measured \GC total time in nano seconds.
|
||||
def self.total_time
|
||||
Primitive.cexpr! %{
|
||||
ULL2NUM(rb_objspace.profile.marking_time_ns + rb_objspace.profile.sweeping_time_ns)
|
||||
rb_gc_impl_get_profile_total_time(rb_gc_get_objspace())
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
4
imemo.c
4
imemo.c
|
@ -214,7 +214,7 @@ rb_cc_table_mark(VALUE klass)
|
|||
static bool
|
||||
moved_or_living_object_strictly_p(VALUE obj)
|
||||
{
|
||||
return obj && (rb_objspace_markable_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED);
|
||||
return obj && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -457,7 +457,7 @@ vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, VALUE klass)
|
|||
if (!alive) {
|
||||
void *ptr = asan_unpoison_object_temporary((VALUE)cc);
|
||||
// ccs can be free'ed.
|
||||
if (rb_objspace_markable_object_p((VALUE)cc) &&
|
||||
if (!rb_objspace_garbage_object_p((VALUE)cc) &&
|
||||
IMEMO_TYPE_P(cc, imemo_callcache) &&
|
||||
cc->klass == klass) {
|
||||
// OK. maybe target cc.
|
||||
|
|
|
@ -163,12 +163,6 @@ struct rb_objspace; /* in vm_core.h */
|
|||
RB_OBJ_WRITE(old, _slot, young); \
|
||||
} while (0)
|
||||
|
||||
// We use SIZE_POOL_COUNT number of shape IDs for transitions out of different size pools
|
||||
// The next available shape ID will be the SPECIAL_CONST_SHAPE_ID
|
||||
#ifndef SIZE_POOL_COUNT
|
||||
# define SIZE_POOL_COUNT 5
|
||||
#endif
|
||||
|
||||
/* Used in places that could malloc during, which can cause the GC to run. We
|
||||
* need to temporarily disable the GC to allow the malloc to happen.
|
||||
* Allocating memory during GC is a bad idea, so use this only when absolutely
|
||||
|
@ -180,16 +174,6 @@ struct rb_objspace; /* in vm_core.h */
|
|||
#define DURING_GC_COULD_MALLOC_REGION_END() \
|
||||
if (_already_disabled == Qfalse) rb_gc_enable()
|
||||
|
||||
typedef struct ractor_newobj_size_pool_cache {
|
||||
struct RVALUE *freelist;
|
||||
struct heap_page *using_page;
|
||||
} rb_ractor_newobj_size_pool_cache_t;
|
||||
|
||||
typedef struct ractor_newobj_cache {
|
||||
size_t incremental_mark_step_allocated_slots;
|
||||
rb_ractor_newobj_size_pool_cache_t size_pool_caches[SIZE_POOL_COUNT];
|
||||
} rb_ractor_newobj_cache_t;
|
||||
|
||||
/* gc.c */
|
||||
extern int ruby_disable_gc;
|
||||
RUBY_ATTR_MALLOC void *ruby_mimmalloc(size_t size);
|
||||
|
@ -197,8 +181,8 @@ RUBY_ATTR_MALLOC void *ruby_mimcalloc(size_t num, size_t size);
|
|||
void ruby_mimfree(void *ptr);
|
||||
void rb_gc_prepare_heap(void);
|
||||
void rb_objspace_set_event_hook(const rb_event_flag_t event);
|
||||
VALUE rb_objspace_gc_enable(struct rb_objspace *);
|
||||
VALUE rb_objspace_gc_disable(struct rb_objspace *);
|
||||
VALUE rb_objspace_gc_enable(void *objspace);
|
||||
VALUE rb_objspace_gc_disable(void *objspace);
|
||||
void ruby_gc_set_params(void);
|
||||
void rb_gc_copy_attributes(VALUE dest, VALUE obj);
|
||||
size_t rb_size_mul_or_raise(size_t, size_t, VALUE); /* used in compile.c */
|
||||
|
@ -212,12 +196,13 @@ RUBY_ATTR_MALLOC void *rb_xcalloc_mul_add_mul(size_t, size_t, size_t, size_t);
|
|||
static inline void *ruby_sized_xrealloc_inlined(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2));
|
||||
static inline void *ruby_sized_xrealloc2_inlined(void *ptr, size_t new_count, size_t elemsiz, size_t old_count) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2, 3));
|
||||
static inline void ruby_sized_xfree_inlined(void *ptr, size_t size);
|
||||
void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache);
|
||||
|
||||
void *rb_gc_ractor_cache_alloc(void);
|
||||
void rb_gc_ractor_cache_free(void *cache);
|
||||
|
||||
bool rb_gc_size_allocatable_p(size_t size);
|
||||
size_t *rb_gc_size_pool_sizes(void);
|
||||
size_t rb_gc_size_pool_id_for_size(size_t size);
|
||||
int rb_objspace_garbage_object_p(VALUE obj);
|
||||
bool rb_gc_is_ptr_to_obj(const void *ptr);
|
||||
|
||||
void rb_gc_mark_and_move(VALUE *ptr);
|
||||
|
||||
|
@ -238,8 +223,8 @@ RUBY_SYMBOL_EXPORT_BEGIN
|
|||
/* exports for objspace module */
|
||||
void rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data);
|
||||
void rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *data);
|
||||
int rb_objspace_markable_object_p(VALUE obj);
|
||||
int rb_objspace_internal_object_p(VALUE obj);
|
||||
int rb_objspace_garbage_object_p(VALUE obj);
|
||||
|
||||
void rb_objspace_each_objects(
|
||||
int (*callback)(void *start, void *end, size_t stride, void *data),
|
||||
|
@ -255,7 +240,6 @@ const char *rb_objspace_data_type_name(VALUE obj);
|
|||
VALUE rb_wb_protected_newobj_of(struct rb_execution_context_struct *, VALUE, VALUE, size_t);
|
||||
VALUE rb_wb_unprotected_newobj_of(VALUE, VALUE, size_t);
|
||||
size_t rb_obj_memsize_of(VALUE);
|
||||
void rb_gc_verify_internal_consistency(void);
|
||||
size_t rb_obj_gc_flags(VALUE, ID[], size_t);
|
||||
void rb_gc_mark_values(long n, const VALUE *values);
|
||||
void rb_gc_mark_vm_stack_values(long n, const VALUE *values);
|
||||
|
|
21
ractor.c
21
ractor.c
|
@ -248,6 +248,9 @@ ractor_free(void *ptr)
|
|||
ractor_queue_free(&r->sync.takers_queue);
|
||||
ractor_local_storage_free(r);
|
||||
rb_hook_list_free(&r->pub.hooks);
|
||||
|
||||
RUBY_ASSERT(rb_free_at_exit || r->newobj_cache == NULL);
|
||||
|
||||
ruby_xfree(r);
|
||||
}
|
||||
|
||||
|
@ -1923,6 +1926,13 @@ vm_insert_ractor0(rb_vm_t *vm, rb_ractor_t *r, bool single_ractor_mode)
|
|||
|
||||
ccan_list_add_tail(&vm->ractor.set, &r->vmlr_node);
|
||||
vm->ractor.cnt++;
|
||||
|
||||
if (r->newobj_cache) {
|
||||
VM_ASSERT(r == ruby_single_main_ractor);
|
||||
}
|
||||
else {
|
||||
r->newobj_cache = rb_gc_ractor_cache_alloc();
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1990,8 +2000,8 @@ vm_remove_ractor(rb_vm_t *vm, rb_ractor_t *cr)
|
|||
}
|
||||
vm->ractor.cnt--;
|
||||
|
||||
/* Clear the cached freelist to prevent a memory leak. */
|
||||
rb_gc_ractor_newobj_cache_clear(&cr->newobj_cache);
|
||||
rb_gc_ractor_cache_free(cr->newobj_cache);
|
||||
cr->newobj_cache = NULL;
|
||||
|
||||
ractor_status_set(cr, ractor_terminated);
|
||||
}
|
||||
|
@ -2021,6 +2031,7 @@ rb_ractor_main_alloc(void)
|
|||
r->loc = Qnil;
|
||||
r->name = Qnil;
|
||||
r->pub.self = Qnil;
|
||||
r->newobj_cache = rb_gc_ractor_cache_alloc();
|
||||
ruby_single_main_ractor = r;
|
||||
|
||||
return r;
|
||||
|
@ -3114,6 +3125,12 @@ rb_ractor_shareable_p_continue(VALUE obj)
|
|||
}
|
||||
|
||||
#if RACTOR_CHECK_MODE > 0
|
||||
void
|
||||
rb_ractor_setup_belonging(VALUE obj)
|
||||
{
|
||||
rb_ractor_setup_belonging_to(obj, rb_ractor_current_id());
|
||||
}
|
||||
|
||||
static enum obj_traverse_iterator_result
|
||||
reset_belonging_enter(VALUE obj)
|
||||
{
|
||||
|
|
|
@ -187,7 +187,7 @@ struct rb_ractor_struct {
|
|||
VALUE verbose;
|
||||
VALUE debug;
|
||||
|
||||
rb_ractor_newobj_cache_t newobj_cache;
|
||||
void *newobj_cache;
|
||||
|
||||
// gc.c rb_objspace_reachable_objects_from
|
||||
struct gc_mark_func_data_struct {
|
||||
|
@ -227,12 +227,13 @@ void rb_ractor_vm_barrier_interrupt_running_thread(rb_ractor_t *r);
|
|||
void rb_ractor_terminate_interrupt_main_thread(rb_ractor_t *r);
|
||||
void rb_ractor_terminate_all(void);
|
||||
bool rb_ractor_main_p_(void);
|
||||
void rb_ractor_finish_marking(void);
|
||||
void rb_ractor_atfork(rb_vm_t *vm, rb_thread_t *th);
|
||||
|
||||
VALUE rb_ractor_ensure_shareable(VALUE obj, VALUE name);
|
||||
|
||||
RUBY_SYMBOL_EXPORT_BEGIN
|
||||
void rb_ractor_finish_marking(void);
|
||||
|
||||
bool rb_ractor_shareable_p_continue(VALUE obj);
|
||||
|
||||
// THIS FUNCTION SHOULD NOT CALL WHILE INCREMENTAL MARKING!!
|
||||
|
@ -349,12 +350,6 @@ rb_ractor_setup_belonging_to(VALUE obj, uint32_t rid)
|
|||
RACTOR_BELONGING_ID(obj) = rid;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rb_ractor_setup_belonging(VALUE obj)
|
||||
{
|
||||
rb_ractor_setup_belonging_to(obj, rb_ractor_current_id());
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
rb_ractor_belonging(VALUE obj)
|
||||
{
|
||||
|
|
|
@ -558,7 +558,7 @@ class TestObjSpace < Test::Unit::TestCase
|
|||
next if obj["type"] == "SHAPE"
|
||||
|
||||
assert_not_nil obj["slot_size"]
|
||||
assert_equal 0, obj["slot_size"] % GC::INTERNAL_CONSTANTS[:RVALUE_SIZE]
|
||||
assert_equal 0, obj["slot_size"] % (GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE] + GC::INTERNAL_CONSTANTS[:RVALUE_OVERHEAD])
|
||||
}
|
||||
end
|
||||
end
|
||||
|
|
|
@ -159,7 +159,7 @@ class TestGc < Test::Unit::TestCase
|
|||
GC.enable if reenable_gc
|
||||
end
|
||||
|
||||
assert_equal GC::INTERNAL_CONSTANTS[:RVALUE_SIZE] * (2**i), stat_heap[:slot_size]
|
||||
assert_equal (GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE] + GC::INTERNAL_CONSTANTS[:RVALUE_OVERHEAD]) * (2**i), stat_heap[:slot_size]
|
||||
assert_operator stat_heap[:heap_allocatable_pages], :<=, stat[:heap_allocatable_pages]
|
||||
assert_operator stat_heap[:heap_eden_pages], :<=, stat[:heap_eden_pages]
|
||||
assert_operator stat_heap[:heap_eden_slots], :>=, 0
|
||||
|
@ -681,7 +681,7 @@ class TestGc < Test::Unit::TestCase
|
|||
|
||||
def test_gc_internals
|
||||
assert_not_nil GC::INTERNAL_CONSTANTS[:HEAP_PAGE_OBJ_LIMIT]
|
||||
assert_not_nil GC::INTERNAL_CONSTANTS[:RVALUE_SIZE]
|
||||
assert_not_nil GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE]
|
||||
end
|
||||
|
||||
def test_sweep_in_finalizer
|
||||
|
|
|
@ -609,10 +609,9 @@ typedef struct rb_at_exit_list {
|
|||
struct rb_at_exit_list *next;
|
||||
} rb_at_exit_list;
|
||||
|
||||
struct rb_objspace;
|
||||
struct rb_objspace *rb_objspace_alloc(void);
|
||||
void rb_objspace_free(struct rb_objspace *);
|
||||
void rb_objspace_call_finalizer(struct rb_objspace *);
|
||||
void *rb_objspace_alloc(void);
|
||||
void rb_objspace_free(void *objspace);
|
||||
void rb_objspace_call_finalizer(void);
|
||||
|
||||
typedef struct rb_hook_list_struct {
|
||||
struct rb_event_hook_struct *hooks;
|
||||
|
|
|
@ -440,7 +440,6 @@ rb_vm_pop_frame_no_int(rb_execution_context_t *ec)
|
|||
{
|
||||
rb_control_frame_t *cfp = ec->cfp;
|
||||
|
||||
if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
|
||||
if (VMDEBUG == 2) SDR();
|
||||
|
||||
ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
|
||||
|
@ -452,7 +451,6 @@ vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *e
|
|||
{
|
||||
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
||||
|
||||
if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
|
||||
if (VMDEBUG == 2) SDR();
|
||||
|
||||
RUBY_VM_CHECK_INTS(ec);
|
||||
|
|
2
yjit.c
2
yjit.c
|
@ -1065,7 +1065,7 @@ rb_IMEMO_TYPE_P(VALUE imemo, enum imemo_type imemo_type)
|
|||
void
|
||||
rb_assert_cme_handle(VALUE handle)
|
||||
{
|
||||
RUBY_ASSERT_ALWAYS(rb_objspace_markable_object_p(handle));
|
||||
RUBY_ASSERT_ALWAYS(!rb_objspace_garbage_object_p(handle));
|
||||
RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_ment));
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче