зеркало из https://github.com/github/ruby.git
Use a linked list to eliminate imemo tmp bufs for managing local tables
This patch changes local table memory to be managed by a linked list rather than via the garbage collector. It reduces allocations from the GC and also fixes a use-after-free bug in the concurrent-with-sweep compactor I'm working on.
This commit is contained in:
Родитель
c5ae79d7e9
Коммит
35ba2783fe
36
node.c
36
node.c
|
@ -1144,6 +1144,7 @@ typedef struct {
|
|||
struct node_buffer_struct {
|
||||
node_buffer_list_t unmarkable;
|
||||
node_buffer_list_t markable;
|
||||
ID *local_tables;
|
||||
VALUE mark_hash;
|
||||
};
|
||||
|
||||
|
@ -1169,6 +1170,7 @@ rb_node_buffer_new(void)
|
|||
node_buffer_t *nb = ruby_xmalloc(alloc_size);
|
||||
init_node_buffer_list(&nb->unmarkable, (node_buffer_elem_t*)&nb[1]);
|
||||
init_node_buffer_list(&nb->markable, (node_buffer_elem_t*)((size_t)nb->unmarkable.head + bucket_size));
|
||||
nb->local_tables = 0;
|
||||
nb->mark_hash = Qnil;
|
||||
return nb;
|
||||
}
|
||||
|
@ -1190,6 +1192,13 @@ rb_node_buffer_free(node_buffer_t *nb)
|
|||
{
|
||||
node_buffer_list_free(&nb->unmarkable);
|
||||
node_buffer_list_free(&nb->markable);
|
||||
ID * local_table = nb->local_tables;
|
||||
while (local_table) {
|
||||
unsigned int size = (unsigned int)*local_table;
|
||||
ID * next_table = (ID *)local_table[size + 1];
|
||||
xfree(local_table);
|
||||
local_table = next_table;
|
||||
}
|
||||
xfree(nb);
|
||||
}
|
||||
|
||||
|
@ -1223,7 +1232,6 @@ rb_ast_newnode(rb_ast_t *ast, enum node_type type)
|
|||
case NODE_DREGX:
|
||||
case NODE_DSYM:
|
||||
case NODE_ARGS:
|
||||
case NODE_SCOPE:
|
||||
case NODE_ARYPTN:
|
||||
case NODE_FNDPTN:
|
||||
return ast_newnode_in_bucket(&nb->markable);
|
||||
|
@ -1232,6 +1240,14 @@ rb_ast_newnode(rb_ast_t *ast, enum node_type type)
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
rb_ast_add_local_table(rb_ast_t *ast, ID *buf)
|
||||
{
|
||||
unsigned int size = (unsigned int)*buf;
|
||||
buf[size + 1] = (ID)ast->node_buffer->local_tables;
|
||||
ast->node_buffer->local_tables = buf;
|
||||
}
|
||||
|
||||
void
|
||||
rb_ast_delete_node(rb_ast_t *ast, NODE *n)
|
||||
{
|
||||
|
@ -1278,15 +1294,6 @@ static void
|
|||
mark_ast_value(void *ctx, NODE * node)
|
||||
{
|
||||
switch (nd_type(node)) {
|
||||
case NODE_SCOPE:
|
||||
{
|
||||
ID *buf = node->nd_tbl;
|
||||
if (buf) {
|
||||
unsigned int size = (unsigned int)*buf;
|
||||
rb_gc_mark_movable((VALUE)buf[size + 1]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case NODE_ARYPTN:
|
||||
{
|
||||
struct rb_ary_pattern_info *apinfo = node->nd_apinfo;
|
||||
|
@ -1324,15 +1331,6 @@ static void
|
|||
update_ast_value(void *ctx, NODE * node)
|
||||
{
|
||||
switch (nd_type(node)) {
|
||||
case NODE_SCOPE:
|
||||
{
|
||||
ID *buf = node->nd_tbl;
|
||||
if (buf) {
|
||||
unsigned int size = (unsigned int)*buf;
|
||||
buf[size + 1] = rb_gc_location((VALUE)buf[size + 1]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case NODE_ARYPTN:
|
||||
{
|
||||
struct rb_ary_pattern_info *apinfo = node->nd_apinfo;
|
||||
|
|
2
node.h
2
node.h
|
@ -401,11 +401,13 @@ typedef struct rb_ast_body_struct {
|
|||
typedef struct rb_ast_struct {
|
||||
VALUE flags;
|
||||
node_buffer_t *node_buffer;
|
||||
ID *local_lists;
|
||||
rb_ast_body_t body;
|
||||
} rb_ast_t;
|
||||
rb_ast_t *rb_ast_new(void);
|
||||
void rb_ast_mark(rb_ast_t*);
|
||||
void rb_ast_update_references(rb_ast_t*);
|
||||
void rb_ast_add_local_table(rb_ast_t*, ID *buf);
|
||||
void rb_ast_dispose(rb_ast_t*);
|
||||
void rb_ast_free(rb_ast_t*);
|
||||
size_t rb_ast_memsize(const rb_ast_t*);
|
||||
|
|
12
parse.y
12
parse.y
|
@ -3050,11 +3050,9 @@ primary : literal
|
|||
ID id = internal_id(p);
|
||||
NODE *m = NEW_ARGS_AUX(0, 0, &NULL_LOC);
|
||||
NODE *args, *scope, *internal_var = NEW_DVAR(id, &@2);
|
||||
VALUE tmpbuf = rb_imemo_tmpbuf_auto_free_pointer();
|
||||
ID *tbl = ALLOC_N(ID, 3);
|
||||
rb_imemo_tmpbuf_set_ptr(tmpbuf, tbl);
|
||||
tbl[0] = 1 /* length of local var table */; tbl[1] = id /* internal id */;
|
||||
tbl[2] = tmpbuf;
|
||||
rb_ast_add_local_table(p->ast, tbl);
|
||||
|
||||
switch (nd_type($2)) {
|
||||
case NODE_LASGN:
|
||||
|
@ -3074,7 +3072,6 @@ primary : literal
|
|||
/* {|*internal_id| <m> = internal_id; ... } */
|
||||
args = new_args(p, m, 0, id, 0, new_args_tail(p, 0, 0, 0, &@2), &@2);
|
||||
scope = NEW_NODE(NODE_SCOPE, tbl, $5, args, &@$);
|
||||
RB_OBJ_WRITTEN(p->ast, Qnil, tmpbuf);
|
||||
$$ = NEW_FOR($4, scope, &@$);
|
||||
fixpos($$, $2);
|
||||
/*% %*/
|
||||
|
@ -12037,12 +12034,9 @@ local_tbl(struct parser_params *p)
|
|||
int cnt = cnt_args + cnt_vars;
|
||||
int i, j;
|
||||
ID *buf;
|
||||
VALUE tbl = 0;
|
||||
|
||||
if (cnt <= 0) return 0;
|
||||
tbl = rb_imemo_tmpbuf_auto_free_pointer();
|
||||
buf = ALLOC_N(ID, cnt + 2);
|
||||
rb_imemo_tmpbuf_set_ptr(tbl, buf);
|
||||
MEMCPY(buf+1, p->lvtbl->args->tbl, ID, cnt_args);
|
||||
/* remove IDs duplicated to warn shadowing */
|
||||
for (i = 0, j = cnt_args+1; i < cnt_vars; ++i) {
|
||||
|
@ -12053,11 +12047,9 @@ local_tbl(struct parser_params *p)
|
|||
}
|
||||
if (--j < cnt) {
|
||||
REALLOC_N(buf, ID, (cnt = j) + 2);
|
||||
rb_imemo_tmpbuf_set_ptr(tbl, buf);
|
||||
}
|
||||
buf[0] = cnt;
|
||||
buf[cnt + 1] = (ID)tbl;
|
||||
RB_OBJ_WRITTEN(p->ast, Qnil, tbl);
|
||||
rb_ast_add_local_table(p->ast, buf);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче