Use an identity hash for pinning Ripper objects

Ripper reuses parse.y for its implementation.  Ripper changes the
grammar productions to sometimes return Ruby objects.  This Ruby objects
are put in to the parser's stack, so they must be kept alive.  This is
where the "mark_ary" comes in.  The mark array ensures that Ruby objects
created and pushed on the stack during the course of parsing will stay
alive for the life of the parsing functions.

Unfortunately, Arrays do not prevent their contents from moving.  If the
compactor runs, objects on the parser stack could move because the array
won't prevent them from moving.  But the GC doesn't know about the
parser stack, so it can't update references in that stack (it will
update them in the array).

This commit changes the mark array to be an identity hash.  Since the
identity hash relies on memory addresses for the definition of identity,
the GC will not allow keys in an identity hash to move.  We can prevent
movement of objects in the parser stack by sticking them in an identity
hash.
This commit is contained in:
Aaron Patterson 2019-11-04 16:04:58 -08:00
Родитель d47b643428
Коммит 7460c884fb
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 953170BCB4FFAFC6
1 изменённых файлов: 6 добавлений и 6 удалений

12
node.c
Просмотреть файл

@ -1129,7 +1129,7 @@ typedef struct {
struct node_buffer_struct { struct node_buffer_struct {
node_buffer_list_t unmarkable; node_buffer_list_t unmarkable;
node_buffer_list_t markable; node_buffer_list_t markable;
VALUE mark_ary; VALUE mark_hash;
}; };
static void static void
@ -1154,7 +1154,7 @@ rb_node_buffer_new(void)
node_buffer_t *nb = ruby_xmalloc(alloc_size); node_buffer_t *nb = ruby_xmalloc(alloc_size);
init_node_buffer_list(&nb->unmarkable, (node_buffer_elem_t*)&nb[1]); init_node_buffer_list(&nb->unmarkable, (node_buffer_elem_t*)&nb[1]);
init_node_buffer_list(&nb->markable, (node_buffer_elem_t*)((size_t)nb->unmarkable.head + bucket_size)); init_node_buffer_list(&nb->markable, (node_buffer_elem_t*)((size_t)nb->unmarkable.head + bucket_size));
nb->mark_ary = Qnil; nb->mark_hash = Qnil;
return nb; return nb;
} }
@ -1350,7 +1350,7 @@ rb_ast_update_references(rb_ast_t *ast)
void void
rb_ast_mark(rb_ast_t *ast) rb_ast_mark(rb_ast_t *ast)
{ {
if (ast->node_buffer) rb_gc_mark(ast->node_buffer->mark_ary); if (ast->node_buffer) rb_gc_mark(ast->node_buffer->mark_hash);
if (ast->body.compile_option) rb_gc_mark(ast->body.compile_option); if (ast->body.compile_option) rb_gc_mark(ast->body.compile_option);
if (ast->node_buffer) { if (ast->node_buffer) {
node_buffer_t *nb = ast->node_buffer; node_buffer_t *nb = ast->node_buffer;
@ -1403,8 +1403,8 @@ rb_ast_dispose(rb_ast_t *ast)
void void
rb_ast_add_mark_object(rb_ast_t *ast, VALUE obj) rb_ast_add_mark_object(rb_ast_t *ast, VALUE obj)
{ {
if (NIL_P(ast->node_buffer->mark_ary)) { if (NIL_P(ast->node_buffer->mark_hash)) {
RB_OBJ_WRITE(ast, &ast->node_buffer->mark_ary, rb_ary_tmp_new(0)); RB_OBJ_WRITE(ast, &ast->node_buffer->mark_hash, rb_ident_hash_new());
} }
rb_ary_push(ast->node_buffer->mark_ary, obj); rb_hash_aset(ast->node_buffer->mark_hash, obj, Qtrue);
} }