Add rb_gc_mark_and_move and implement on iseq

This commit adds rb_gc_mark_and_move which takes a pointer to an object
and marks it during marking phase and updates references during compaction.
This allows for marking and reference updating to be combined into a
single function, which reduces code duplication and prevents bugs if
marking and reference updating goes out of sync.

This commit also implements rb_gc_mark_and_move on iseq as an example.
This commit is contained in:
Peter Zhu 2023-01-17 11:21:21 -05:00
Родитель 6f3aff3961
Коммит 41bf2354e3
3 изменённых файлов: 108 добавлений и 168 удалений

29
gc.c
Просмотреть файл

@ -746,6 +746,7 @@ typedef struct rb_objspace {
unsigned int dont_incremental : 1; unsigned int dont_incremental : 1;
unsigned int during_gc : 1; unsigned int during_gc : 1;
unsigned int during_compacting : 1; unsigned int during_compacting : 1;
unsigned int during_reference_updating : 1;
unsigned int gc_stressful: 1; unsigned int gc_stressful: 1;
unsigned int has_hook: 1; unsigned int has_hook: 1;
unsigned int during_minor_gc : 1; unsigned int during_minor_gc : 1;
@ -1210,8 +1211,7 @@ VALUE rb_mGC;
int ruby_disable_gc = 0; int ruby_disable_gc = 0;
int ruby_enable_autocompact = 0; int ruby_enable_autocompact = 0;
void rb_iseq_mark(const rb_iseq_t *iseq); void rb_iseq_mark_and_update(rb_iseq_t *iseq, bool referece_updating);
void rb_iseq_update_references(rb_iseq_t *iseq);
void rb_iseq_free(const rb_iseq_t *iseq); void rb_iseq_free(const rb_iseq_t *iseq);
size_t rb_iseq_memsize(const rb_iseq_t *iseq); size_t rb_iseq_memsize(const rb_iseq_t *iseq);
void rb_vm_update_references(void *ptr); void rb_vm_update_references(void *ptr);
@ -7104,6 +7104,23 @@ rb_gc_mark(VALUE ptr)
gc_mark_and_pin(&rb_objspace, ptr); gc_mark_and_pin(&rb_objspace, ptr);
} }
void
rb_gc_mark_and_move(VALUE *ptr)
{
rb_objspace_t *objspace = &rb_objspace;
if (RB_SPECIAL_CONST_P(*ptr)) return;
if (UNLIKELY(objspace->flags.during_reference_updating)) {
GC_ASSERT(objspace->flags.during_compacting);
GC_ASSERT(during_gc);
*ptr = rb_gc_location(*ptr);
}
else {
gc_mark_ptr(objspace, *ptr);
}
}
/* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING. /* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
* This function is only for GC_END_MARK timing. * This function is only for GC_END_MARK timing.
*/ */
@ -7170,7 +7187,7 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
mark_method_entry(objspace, &RANY(obj)->as.imemo.ment); mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
return; return;
case imemo_iseq: case imemo_iseq:
rb_iseq_mark((rb_iseq_t *)obj); rb_iseq_mark_and_update((rb_iseq_t *)obj, false);
return; return;
case imemo_tmpbuf: case imemo_tmpbuf:
{ {
@ -10327,7 +10344,7 @@ gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment); gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
break; break;
case imemo_iseq: case imemo_iseq:
rb_iseq_update_references((rb_iseq_t *)obj); rb_iseq_mark_and_update((rb_iseq_t *)obj, true);
break; break;
case imemo_ast: case imemo_ast:
rb_ast_update_references((rb_ast_t *)obj); rb_ast_update_references((rb_ast_t *)obj);
@ -10763,6 +10780,8 @@ extern rb_symbols_t ruby_global_symbols;
static void static void
gc_update_references(rb_objspace_t *objspace) gc_update_references(rb_objspace_t *objspace)
{ {
objspace->flags.during_reference_updating = true;
rb_execution_context_t *ec = GET_EC(); rb_execution_context_t *ec = GET_EC();
rb_vm_t *vm = rb_ec_vm_ptr(ec); rb_vm_t *vm = rb_ec_vm_ptr(ec);
@ -10795,6 +10814,8 @@ gc_update_references(rb_objspace_t *objspace)
gc_update_table_refs(objspace, objspace->id_to_obj_tbl); gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
gc_update_table_refs(objspace, global_symbols.str_sym); gc_update_table_refs(objspace, global_symbols.str_sym);
gc_update_table_refs(objspace, finalizer_table); gc_update_table_refs(objspace, finalizer_table);
objspace->flags.during_reference_updating = false;
} }
#if GC_CAN_COMPILE_COMPACTION #if GC_CAN_COMPILE_COMPACTION

Просмотреть файл

@ -113,6 +113,14 @@ size_t rb_gc_obj_slot_size(VALUE obj);
bool rb_gc_size_allocatable_p(size_t size); bool rb_gc_size_allocatable_p(size_t size);
int rb_objspace_garbage_object_p(VALUE obj); int rb_objspace_garbage_object_p(VALUE obj);
void rb_gc_mark_and_move(VALUE *ptr);
#define rb_gc_mark_and_move_ptr(ptr) do { \
VALUE _obj = (VALUE)*(ptr); \
rb_gc_mark_and_move(&_obj); \
if (_obj != (VALUE)*(ptr)) *(ptr) = (void *)_obj; \
} while (0)
RUBY_SYMBOL_EXPORT_BEGIN RUBY_SYMBOL_EXPORT_BEGIN
/* gc.c (export) */ /* gc.c (export) */
const char *rb_objspace_data_type_name(VALUE obj); const char *rb_objspace_data_type_name(VALUE obj);

239
iseq.c
Просмотреть файл

@ -206,7 +206,7 @@ rb_iseq_free(const rb_iseq_t *iseq)
typedef VALUE iseq_value_itr_t(void *ctx, VALUE obj); typedef VALUE iseq_value_itr_t(void *ctx, VALUE obj);
static inline void static inline void
iseq_scan_bits(unsigned int page, iseq_bits_t bits, VALUE *code, iseq_value_itr_t *func, void *data) iseq_scan_bits(unsigned int page, iseq_bits_t bits, VALUE *code, VALUE *original_iseq)
{ {
unsigned int offset; unsigned int offset;
unsigned int page_offset = (page * ISEQ_MBITS_BITLENGTH); unsigned int page_offset = (page * ISEQ_MBITS_BITLENGTH);
@ -214,20 +214,17 @@ iseq_scan_bits(unsigned int page, iseq_bits_t bits, VALUE *code, iseq_value_itr_
while (bits) { while (bits) {
offset = ntz_intptr(bits); offset = ntz_intptr(bits);
VALUE op = code[page_offset + offset]; VALUE op = code[page_offset + offset];
VALUE newop = func(data, op); rb_gc_mark_and_move(&code[page_offset + offset]);
if (newop != op) { VALUE newop = code[page_offset + offset];
code[page_offset + offset] = newop; if (original_iseq && newop != op) {
if (data) { original_iseq[page_offset + offset] = newop;
VALUE *original_iseq = (VALUE *)data;
original_iseq[page_offset + offset] = newop;
}
} }
bits &= bits - 1; // Reset Lowest Set Bit (BLSR) bits &= bits - 1; // Reset Lowest Set Bit (BLSR)
} }
} }
static void static void
rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data) rb_iseq_mark_and_move_each_value(const rb_iseq_t *iseq, VALUE *original_iseq)
{ {
unsigned int size; unsigned int size;
VALUE *code; VALUE *code;
@ -248,10 +245,7 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
if (icvarc->entry) { if (icvarc->entry) {
RUBY_ASSERT(!RB_TYPE_P(icvarc->entry->class_value, T_NONE)); RUBY_ASSERT(!RB_TYPE_P(icvarc->entry->class_value, T_NONE));
VALUE nv = func(data, icvarc->entry->class_value); rb_gc_mark_and_move(&icvarc->entry->class_value);
if (icvarc->entry->class_value != nv) {
icvarc->entry->class_value = nv;
}
} }
} }
@ -259,10 +253,7 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
for (unsigned int i = 0; i < body->ise_size; i++, is_entries++) { for (unsigned int i = 0; i < body->ise_size; i++, is_entries++) {
union iseq_inline_storage_entry *const is = (union iseq_inline_storage_entry *)is_entries; union iseq_inline_storage_entry *const is = (union iseq_inline_storage_entry *)is_entries;
if (is->once.value) { if (is->once.value) {
VALUE nv = func(data, is->once.value); rb_gc_mark_and_move(&is->once.value);
if (is->once.value != nv) {
is->once.value = nv;
}
} }
} }
@ -270,10 +261,7 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
for (unsigned int i = 0; i < body->ic_size; i++, is_entries++) { for (unsigned int i = 0; i < body->ic_size; i++, is_entries++) {
IC ic = (IC)is_entries; IC ic = (IC)is_entries;
if (ic->entry) { if (ic->entry) {
VALUE nv = func(data, (VALUE)ic->entry); rb_gc_mark_and_move_ptr(&ic->entry);
if ((VALUE)ic->entry != nv) {
ic->entry = (void *)nv;
}
} }
} }
} }
@ -281,139 +269,55 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
// Embedded VALUEs // Embedded VALUEs
if (body->mark_bits.list) { if (body->mark_bits.list) {
if (ISEQ_MBITS_BUFLEN(size) == 1) { if (ISEQ_MBITS_BUFLEN(size) == 1) {
iseq_scan_bits(0, body->mark_bits.single, code, func, data); iseq_scan_bits(0, body->mark_bits.single, code, original_iseq);
} }
else { else {
if (body->mark_bits.list) { if (body->mark_bits.list) {
for (unsigned int i = 0; i < ISEQ_MBITS_BUFLEN(size); i++) { for (unsigned int i = 0; i < ISEQ_MBITS_BUFLEN(size); i++) {
iseq_bits_t bits = body->mark_bits.list[i]; iseq_bits_t bits = body->mark_bits.list[i];
iseq_scan_bits(i, bits, code, func, data); iseq_scan_bits(i, bits, code, original_iseq);
} }
} }
} }
} }
} }
static VALUE
update_each_insn_value(void *ctx, VALUE obj)
{
return rb_gc_location(obj);
}
void void
rb_iseq_update_references(rb_iseq_t *iseq) rb_iseq_mark_and_update(rb_iseq_t *iseq, bool reference_updating)
{
if (ISEQ_BODY(iseq)) {
struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
body->variable.coverage = rb_gc_location(body->variable.coverage);
body->variable.pc2branchindex = rb_gc_location(body->variable.pc2branchindex);
body->variable.script_lines = rb_gc_location(body->variable.script_lines);
body->location.label = rb_gc_location(body->location.label);
body->location.base_label = rb_gc_location(body->location.base_label);
body->location.pathobj = rb_gc_location(body->location.pathobj);
if (body->local_iseq) {
body->local_iseq = (struct rb_iseq_struct *)rb_gc_location((VALUE)body->local_iseq);
}
if (body->parent_iseq) {
body->parent_iseq = (struct rb_iseq_struct *)rb_gc_location((VALUE)body->parent_iseq);
}
if (body->mandatory_only_iseq) {
body->mandatory_only_iseq = (struct rb_iseq_struct *)rb_gc_location((VALUE)body->mandatory_only_iseq);
}
if (body->call_data) {
for (unsigned int i=0; i<body->ci_size; i++) {
struct rb_call_data *cds = body->call_data;
if (cds[i].ci) {
cds[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)cds[i].ci);
}
cds[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)cds[i].cc);
}
}
VALUE *original_iseq = ISEQ_ORIGINAL_ISEQ(iseq);
rb_iseq_each_value(iseq, update_each_insn_value, (void *)original_iseq);
if (body->param.flags.has_kw && ISEQ_COMPILE_DATA(iseq) == NULL) {
int i, j;
i = body->param.keyword->required_num;
for (j = 0; i < body->param.keyword->num; i++, j++) {
VALUE obj = body->param.keyword->default_values[j];
if (!UNDEF_P(obj)) {
body->param.keyword->default_values[j] = rb_gc_location(obj);
}
}
}
if (body->catch_table) {
struct iseq_catch_table *table = body->catch_table;
unsigned int i;
for (i = 0; i < table->size; i++) {
struct iseq_catch_table_entry *entry;
entry = UNALIGNED_MEMBER_PTR(table, entries[i]);
if (entry->iseq) {
entry->iseq = (rb_iseq_t *)rb_gc_location((VALUE)entry->iseq);
}
}
}
#if USE_MJIT
mjit_update_references(iseq);
#endif
#if USE_YJIT
rb_yjit_iseq_update_references(body->yjit_payload);
#endif
}
}
static VALUE
each_insn_value(void *ctx, VALUE obj)
{
rb_gc_mark_movable(obj);
return obj;
}
void
rb_iseq_mark(const rb_iseq_t *iseq)
{ {
RUBY_MARK_ENTER("iseq"); RUBY_MARK_ENTER("iseq");
RUBY_MARK_UNLESS_NULL(iseq->wrapper); rb_gc_mark_and_move(&iseq->wrapper);
if (ISEQ_BODY(iseq)) { if (ISEQ_BODY(iseq)) {
const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq); struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
rb_iseq_each_value(iseq, each_insn_value, NULL); rb_iseq_mark_and_move_each_value(iseq, reference_updating ? ISEQ_ORIGINAL_ISEQ(iseq) : NULL);
rb_gc_mark_movable(body->variable.coverage); rb_gc_mark_and_move(&body->variable.coverage);
rb_gc_mark_movable(body->variable.pc2branchindex); rb_gc_mark_and_move(&body->variable.pc2branchindex);
rb_gc_mark_movable(body->variable.script_lines); rb_gc_mark_and_move(&body->variable.script_lines);
rb_gc_mark_movable(body->location.label); rb_gc_mark_and_move(&body->location.label);
rb_gc_mark_movable(body->location.base_label); rb_gc_mark_and_move(&body->location.base_label);
rb_gc_mark_movable(body->location.pathobj); rb_gc_mark_and_move(&body->location.pathobj);
RUBY_MARK_MOVABLE_UNLESS_NULL((VALUE)body->mandatory_only_iseq); if (body->local_iseq) rb_gc_mark_and_move_ptr(&body->local_iseq);
RUBY_MARK_MOVABLE_UNLESS_NULL((VALUE)body->parent_iseq); if (body->parent_iseq) rb_gc_mark_and_move_ptr(&body->parent_iseq);
if (body->mandatory_only_iseq) rb_gc_mark_and_move_ptr(&body->mandatory_only_iseq);
if (body->call_data) { if (body->call_data) {
struct rb_call_data *cds = (struct rb_call_data *)body->call_data; for (unsigned int i = 0; i < body->ci_size; i++) {
for (unsigned int i=0; i<body->ci_size; i++) { struct rb_call_data *cds = body->call_data;
const struct rb_callinfo *ci = cds[i].ci;
const struct rb_callcache *cc = cds[i].cc;
if (ci) { if (cds[i].ci) rb_gc_mark_and_move_ptr(&cds[i].ci);
rb_gc_mark_movable((VALUE)ci);
}
if (cc) { if (cds[i].cc && (reference_updating || vm_cc_markable(cds[i].cc))) {
VM_ASSERT((cc->flags & VM_CALLCACHE_ON_STACK) == 0); VM_ASSERT(reference_updating || (cds[i].cc->flags & VM_CALLCACHE_ON_STACK) == 0);
if (vm_cc_markable(cc)) { if (reference_updating || !vm_cc_invalidated_p(cds[i].cc)) {
if (!vm_cc_invalidated_p(cc)) { rb_gc_mark_and_move_ptr(&cds[i].cc);
rb_gc_mark_movable((VALUE)cc); }
} else {
else { cds[i].cc = rb_vm_empty_cc();
cds[i].cc = rb_vm_empty_cc();
}
} }
} }
} }
@ -421,57 +325,64 @@ rb_iseq_mark(const rb_iseq_t *iseq)
if (body->param.flags.has_kw && ISEQ_COMPILE_DATA(iseq) == NULL) { if (body->param.flags.has_kw && ISEQ_COMPILE_DATA(iseq) == NULL) {
const struct rb_iseq_param_keyword *const keyword = body->param.keyword; const struct rb_iseq_param_keyword *const keyword = body->param.keyword;
int i, j;
i = keyword->required_num; for (int j = 0, i = keyword->required_num; i < keyword->num; i++, j++) {
rb_gc_mark_and_move(&keyword->default_values[j]);
for (j = 0; i < keyword->num; i++, j++) {
VALUE obj = keyword->default_values[j];
if (!SPECIAL_CONST_P(obj)) {
rb_gc_mark_movable(obj);
}
} }
} }
if (body->catch_table) { if (body->catch_table) {
const struct iseq_catch_table *table = body->catch_table; struct iseq_catch_table *table = body->catch_table;
unsigned int i;
for (i = 0; i < table->size; i++) { for (unsigned int i = 0; i < table->size; i++) {
const struct iseq_catch_table_entry *entry; struct iseq_catch_table_entry *entry;
entry = UNALIGNED_MEMBER_PTR(table, entries[i]); entry = UNALIGNED_MEMBER_PTR(table, entries[i]);
if (entry->iseq) { if (entry->iseq) {
rb_gc_mark_movable((VALUE)entry->iseq); rb_gc_mark_and_move_ptr(&entry->iseq);
} }
} }
} }
if (reference_updating) {
#if USE_MJIT #if USE_MJIT
mjit_mark_cc_entries(body); mjit_update_references(iseq);
#endif #endif
#if USE_YJIT #if USE_YJIT
rb_yjit_iseq_mark(body->yjit_payload); rb_yjit_iseq_update_references(body->yjit_payload);
#endif
}
else {
#if USE_MJIT
mjit_mark_cc_entries(body);
#endif
#if USE_YJIT
rb_yjit_iseq_mark(body->yjit_payload);
#endif #endif
}
if (FL_TEST_RAW((VALUE)iseq, ISEQ_NOT_LOADED_YET)) {
rb_gc_mark(iseq->aux.loader.obj);
}
else if (FL_TEST_RAW((VALUE)iseq, ISEQ_USE_COMPILE_DATA)) {
const struct iseq_compile_data *const compile_data = ISEQ_COMPILE_DATA(iseq);
rb_iseq_mark_insn_storage(compile_data->insn.storage_head);
RUBY_MARK_UNLESS_NULL(compile_data->err_info);
if (RTEST(compile_data->catch_table_ary)) {
rb_gc_mark(compile_data->catch_table_ary);
} }
VM_ASSERT(compile_data != NULL);
} }
else {
/* executable */ // TODO: make these not pinned
VM_ASSERT(ISEQ_EXECUTABLE_P(iseq)); if (!reference_updating) {
if (iseq->aux.exec.local_hooks) { if (FL_TEST_RAW((VALUE)iseq, ISEQ_NOT_LOADED_YET)) {
rb_hook_list_mark(iseq->aux.exec.local_hooks); rb_gc_mark(iseq->aux.loader.obj);
}
else if (FL_TEST_RAW((VALUE)iseq, ISEQ_USE_COMPILE_DATA)) {
const struct iseq_compile_data *const compile_data = ISEQ_COMPILE_DATA(iseq);
rb_iseq_mark_insn_storage(compile_data->insn.storage_head);
RUBY_MARK_UNLESS_NULL(compile_data->err_info);
if (RTEST(compile_data->catch_table_ary)) {
rb_gc_mark(compile_data->catch_table_ary);
}
VM_ASSERT(compile_data != NULL);
}
else {
/* executable */
VM_ASSERT(ISEQ_EXECUTABLE_P(iseq));
if (iseq->aux.exec.local_hooks) {
rb_hook_list_mark(iseq->aux.exec.local_hooks);
}
} }
} }