зеркало из https://github.com/github/ruby.git
Tie lifetime of uJIT blocks to iseqs
* Tie lifetime of uJIT blocks to iseqs Blocks weren't being freed when iseqs are collected. * Add rb_dary. Use it for method dependency table * Keep track of blocks per iseq Remove global version_tbl * Block version bookkeeping fix * dary -> darray * free ujit_blocks * comment about size of ujit_blocks
This commit is contained in:
Родитель
148ab79cd7
Коммит
c02517bacb
34
common.mk
34
common.mk
|
@ -1961,6 +1961,7 @@ ast.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
ast.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
ast.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
ast.$(OBJEXT): {$(VPATH)}config.h
|
||||
ast.$(OBJEXT): {$(VPATH)}darray.h
|
||||
ast.$(OBJEXT): {$(VPATH)}defines.h
|
||||
ast.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
ast.$(OBJEXT): {$(VPATH)}id.h
|
||||
|
@ -2338,6 +2339,7 @@ builtin.$(OBJEXT): {$(VPATH)}builtin.c
|
|||
builtin.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
builtin.$(OBJEXT): {$(VPATH)}builtin_binary.inc
|
||||
builtin.$(OBJEXT): {$(VPATH)}config.h
|
||||
builtin.$(OBJEXT): {$(VPATH)}darray.h
|
||||
builtin.$(OBJEXT): {$(VPATH)}defines.h
|
||||
builtin.$(OBJEXT): {$(VPATH)}id.h
|
||||
builtin.$(OBJEXT): {$(VPATH)}intern.h
|
||||
|
@ -2528,7 +2530,7 @@ class.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
|||
class.$(OBJEXT): {$(VPATH)}class.c
|
||||
class.$(OBJEXT): {$(VPATH)}config.h
|
||||
class.$(OBJEXT): {$(VPATH)}constant.h
|
||||
class.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
class.$(OBJEXT): {$(VPATH)}darray.h
|
||||
class.$(OBJEXT): {$(VPATH)}defines.h
|
||||
class.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
class.$(OBJEXT): {$(VPATH)}id.h
|
||||
|
@ -2921,6 +2923,7 @@ compile.$(OBJEXT): {$(VPATH)}builtin.h
|
|||
compile.$(OBJEXT): {$(VPATH)}compile.c
|
||||
compile.$(OBJEXT): {$(VPATH)}config.h
|
||||
compile.$(OBJEXT): {$(VPATH)}constant.h
|
||||
compile.$(OBJEXT): {$(VPATH)}darray.h
|
||||
compile.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
compile.$(OBJEXT): {$(VPATH)}defines.h
|
||||
compile.$(OBJEXT): {$(VPATH)}encindex.h
|
||||
|
@ -3319,6 +3322,7 @@ cont.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
cont.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
cont.$(OBJEXT): {$(VPATH)}config.h
|
||||
cont.$(OBJEXT): {$(VPATH)}cont.c
|
||||
cont.$(OBJEXT): {$(VPATH)}darray.h
|
||||
cont.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
cont.$(OBJEXT): {$(VPATH)}defines.h
|
||||
cont.$(OBJEXT): {$(VPATH)}eval_intern.h
|
||||
|
@ -3510,6 +3514,7 @@ debug.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
|
|||
debug.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
||||
debug.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
debug.$(OBJEXT): {$(VPATH)}config.h
|
||||
debug.$(OBJEXT): {$(VPATH)}darray.h
|
||||
debug.$(OBJEXT): {$(VPATH)}debug.c
|
||||
debug.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
debug.$(OBJEXT): {$(VPATH)}defines.h
|
||||
|
@ -5180,6 +5185,7 @@ error.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
|||
error.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
error.$(OBJEXT): {$(VPATH)}config.h
|
||||
error.$(OBJEXT): {$(VPATH)}constant.h
|
||||
error.$(OBJEXT): {$(VPATH)}darray.h
|
||||
error.$(OBJEXT): {$(VPATH)}defines.h
|
||||
error.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
error.$(OBJEXT): {$(VPATH)}error.c
|
||||
|
@ -5390,6 +5396,7 @@ eval.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
eval.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
eval.$(OBJEXT): {$(VPATH)}config.h
|
||||
eval.$(OBJEXT): {$(VPATH)}constant.h
|
||||
eval.$(OBJEXT): {$(VPATH)}darray.h
|
||||
eval.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
eval.$(OBJEXT): {$(VPATH)}defines.h
|
||||
eval.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
|
@ -5834,6 +5841,7 @@ gc.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
|||
gc.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
gc.$(OBJEXT): {$(VPATH)}config.h
|
||||
gc.$(OBJEXT): {$(VPATH)}constant.h
|
||||
gc.$(OBJEXT): {$(VPATH)}darray.h
|
||||
gc.$(OBJEXT): {$(VPATH)}debug.h
|
||||
gc.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
gc.$(OBJEXT): {$(VPATH)}defines.h
|
||||
|
@ -6835,6 +6843,7 @@ iseq.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
|||
iseq.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
iseq.$(OBJEXT): {$(VPATH)}config.h
|
||||
iseq.$(OBJEXT): {$(VPATH)}constant.h
|
||||
iseq.$(OBJEXT): {$(VPATH)}darray.h
|
||||
iseq.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
iseq.$(OBJEXT): {$(VPATH)}defines.h
|
||||
iseq.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
|
@ -7051,6 +7060,7 @@ load.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
load.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
load.$(OBJEXT): {$(VPATH)}config.h
|
||||
load.$(OBJEXT): {$(VPATH)}constant.h
|
||||
load.$(OBJEXT): {$(VPATH)}darray.h
|
||||
load.$(OBJEXT): {$(VPATH)}defines.h
|
||||
load.$(OBJEXT): {$(VPATH)}dln.h
|
||||
load.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
|
@ -8281,6 +8291,7 @@ miniinit.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
miniinit.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
miniinit.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
miniinit.$(OBJEXT): {$(VPATH)}config.h
|
||||
miniinit.$(OBJEXT): {$(VPATH)}darray.h
|
||||
miniinit.$(OBJEXT): {$(VPATH)}defines.h
|
||||
miniinit.$(OBJEXT): {$(VPATH)}dir.rb
|
||||
miniinit.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
|
@ -8532,6 +8543,7 @@ mjit.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
|||
mjit.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
mjit.$(OBJEXT): {$(VPATH)}config.h
|
||||
mjit.$(OBJEXT): {$(VPATH)}constant.h
|
||||
mjit.$(OBJEXT): {$(VPATH)}darray.h
|
||||
mjit.$(OBJEXT): {$(VPATH)}debug.h
|
||||
mjit.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
mjit.$(OBJEXT): {$(VPATH)}defines.h
|
||||
|
@ -8762,6 +8774,7 @@ mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
|||
mjit_compile.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
mjit_compile.$(OBJEXT): {$(VPATH)}config.h
|
||||
mjit_compile.$(OBJEXT): {$(VPATH)}constant.h
|
||||
mjit_compile.$(OBJEXT): {$(VPATH)}darray.h
|
||||
mjit_compile.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
mjit_compile.$(OBJEXT): {$(VPATH)}defines.h
|
||||
mjit_compile.$(OBJEXT): {$(VPATH)}id.h
|
||||
|
@ -8958,6 +8971,7 @@ node.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
node.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
node.$(OBJEXT): {$(VPATH)}config.h
|
||||
node.$(OBJEXT): {$(VPATH)}constant.h
|
||||
node.$(OBJEXT): {$(VPATH)}darray.h
|
||||
node.$(OBJEXT): {$(VPATH)}defines.h
|
||||
node.$(OBJEXT): {$(VPATH)}id.h
|
||||
node.$(OBJEXT): {$(VPATH)}id_table.h
|
||||
|
@ -9970,6 +9984,7 @@ proc.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
|
|||
proc.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
||||
proc.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
proc.$(OBJEXT): {$(VPATH)}config.h
|
||||
proc.$(OBJEXT): {$(VPATH)}darray.h
|
||||
proc.$(OBJEXT): {$(VPATH)}defines.h
|
||||
proc.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
proc.$(OBJEXT): {$(VPATH)}eval_intern.h
|
||||
|
@ -10181,6 +10196,7 @@ process.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
process.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
process.$(OBJEXT): {$(VPATH)}config.h
|
||||
process.$(OBJEXT): {$(VPATH)}constant.h
|
||||
process.$(OBJEXT): {$(VPATH)}darray.h
|
||||
process.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
process.$(OBJEXT): {$(VPATH)}defines.h
|
||||
process.$(OBJEXT): {$(VPATH)}dln.h
|
||||
|
@ -10397,6 +10413,7 @@ ractor.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
|||
ractor.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
ractor.$(OBJEXT): {$(VPATH)}config.h
|
||||
ractor.$(OBJEXT): {$(VPATH)}constant.h
|
||||
ractor.$(OBJEXT): {$(VPATH)}darray.h
|
||||
ractor.$(OBJEXT): {$(VPATH)}debug.h
|
||||
ractor.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
ractor.$(OBJEXT): {$(VPATH)}defines.h
|
||||
|
@ -12341,6 +12358,7 @@ ruby.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
ruby.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
ruby.$(OBJEXT): {$(VPATH)}config.h
|
||||
ruby.$(OBJEXT): {$(VPATH)}constant.h
|
||||
ruby.$(OBJEXT): {$(VPATH)}darray.h
|
||||
ruby.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
ruby.$(OBJEXT): {$(VPATH)}defines.h
|
||||
ruby.$(OBJEXT): {$(VPATH)}dln.h
|
||||
|
@ -12544,6 +12562,7 @@ scheduler.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
|
|||
scheduler.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
||||
scheduler.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
scheduler.$(OBJEXT): {$(VPATH)}config.h
|
||||
scheduler.$(OBJEXT): {$(VPATH)}darray.h
|
||||
scheduler.$(OBJEXT): {$(VPATH)}defines.h
|
||||
scheduler.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
scheduler.$(OBJEXT): {$(VPATH)}fiber/scheduler.h
|
||||
|
@ -12904,6 +12923,7 @@ signal.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
|
|||
signal.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
||||
signal.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
signal.$(OBJEXT): {$(VPATH)}config.h
|
||||
signal.$(OBJEXT): {$(VPATH)}darray.h
|
||||
signal.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
signal.$(OBJEXT): {$(VPATH)}defines.h
|
||||
signal.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
|
@ -13890,6 +13910,7 @@ struct.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
struct.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
struct.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
struct.$(OBJEXT): {$(VPATH)}config.h
|
||||
struct.$(OBJEXT): {$(VPATH)}darray.h
|
||||
struct.$(OBJEXT): {$(VPATH)}defines.h
|
||||
struct.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
struct.$(OBJEXT): {$(VPATH)}id.h
|
||||
|
@ -14295,6 +14316,7 @@ thread.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
|
|||
thread.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
||||
thread.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
thread.$(OBJEXT): {$(VPATH)}config.h
|
||||
thread.$(OBJEXT): {$(VPATH)}darray.h
|
||||
thread.$(OBJEXT): {$(VPATH)}debug.h
|
||||
thread.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
thread.$(OBJEXT): {$(VPATH)}defines.h
|
||||
|
@ -15083,6 +15105,7 @@ ujit_codegen.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
ujit_codegen.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
ujit_codegen.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
ujit_codegen.$(OBJEXT): {$(VPATH)}config.h
|
||||
ujit_codegen.$(OBJEXT): {$(VPATH)}darray.h
|
||||
ujit_codegen.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
ujit_codegen.$(OBJEXT): {$(VPATH)}defines.h
|
||||
ujit_codegen.$(OBJEXT): {$(VPATH)}id.h
|
||||
|
@ -15474,6 +15497,7 @@ ujit_core.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
ujit_core.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
ujit_core.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
ujit_core.$(OBJEXT): {$(VPATH)}config.h
|
||||
ujit_core.$(OBJEXT): {$(VPATH)}darray.h
|
||||
ujit_core.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
ujit_core.$(OBJEXT): {$(VPATH)}defines.h
|
||||
ujit_core.$(OBJEXT): {$(VPATH)}id.h
|
||||
|
@ -15671,6 +15695,7 @@ ujit_iface.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
ujit_iface.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
ujit_iface.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
ujit_iface.$(OBJEXT): {$(VPATH)}config.h
|
||||
ujit_iface.$(OBJEXT): {$(VPATH)}darray.h
|
||||
ujit_iface.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
ujit_iface.$(OBJEXT): {$(VPATH)}defines.h
|
||||
ujit_iface.$(OBJEXT): {$(VPATH)}id.h
|
||||
|
@ -16050,6 +16075,7 @@ variable.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
variable.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
variable.$(OBJEXT): {$(VPATH)}config.h
|
||||
variable.$(OBJEXT): {$(VPATH)}constant.h
|
||||
variable.$(OBJEXT): {$(VPATH)}darray.h
|
||||
variable.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
variable.$(OBJEXT): {$(VPATH)}defines.h
|
||||
variable.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
|
@ -16256,6 +16282,7 @@ version.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
version.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
version.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
version.$(OBJEXT): {$(VPATH)}config.h
|
||||
version.$(OBJEXT): {$(VPATH)}darray.h
|
||||
version.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
version.$(OBJEXT): {$(VPATH)}defines.h
|
||||
version.$(OBJEXT): {$(VPATH)}id.h
|
||||
|
@ -16468,6 +16495,7 @@ vm.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
|||
vm.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
vm.$(OBJEXT): {$(VPATH)}config.h
|
||||
vm.$(OBJEXT): {$(VPATH)}constant.h
|
||||
vm.$(OBJEXT): {$(VPATH)}darray.h
|
||||
vm.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
vm.$(OBJEXT): {$(VPATH)}defines.h
|
||||
vm.$(OBJEXT): {$(VPATH)}defs/opt_operand.def
|
||||
|
@ -16694,6 +16722,7 @@ vm_backtrace.$(OBJEXT): {$(VPATH)}backward/2/long_long.h
|
|||
vm_backtrace.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
||||
vm_backtrace.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
vm_backtrace.$(OBJEXT): {$(VPATH)}config.h
|
||||
vm_backtrace.$(OBJEXT): {$(VPATH)}darray.h
|
||||
vm_backtrace.$(OBJEXT): {$(VPATH)}debug.h
|
||||
vm_backtrace.$(OBJEXT): {$(VPATH)}defines.h
|
||||
vm_backtrace.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
|
@ -16893,6 +16922,7 @@ vm_dump.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
vm_dump.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
vm_dump.$(OBJEXT): {$(VPATH)}config.h
|
||||
vm_dump.$(OBJEXT): {$(VPATH)}constant.h
|
||||
vm_dump.$(OBJEXT): {$(VPATH)}darray.h
|
||||
vm_dump.$(OBJEXT): {$(VPATH)}defines.h
|
||||
vm_dump.$(OBJEXT): {$(VPATH)}gc.h
|
||||
vm_dump.$(OBJEXT): {$(VPATH)}id.h
|
||||
|
@ -17083,6 +17113,7 @@ vm_sync.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
vm_sync.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
vm_sync.$(OBJEXT): {$(VPATH)}config.h
|
||||
vm_sync.$(OBJEXT): {$(VPATH)}constant.h
|
||||
vm_sync.$(OBJEXT): {$(VPATH)}darray.h
|
||||
vm_sync.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
vm_sync.$(OBJEXT): {$(VPATH)}defines.h
|
||||
vm_sync.$(OBJEXT): {$(VPATH)}gc.h
|
||||
|
@ -17282,6 +17313,7 @@ vm_trace.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
|
|||
vm_trace.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
|
||||
vm_trace.$(OBJEXT): {$(VPATH)}builtin.h
|
||||
vm_trace.$(OBJEXT): {$(VPATH)}config.h
|
||||
vm_trace.$(OBJEXT): {$(VPATH)}darray.h
|
||||
vm_trace.$(OBJEXT): {$(VPATH)}debug.h
|
||||
vm_trace.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||
vm_trace.$(OBJEXT): {$(VPATH)}defines.h
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
#ifndef RUBY_DARRAY_H
|
||||
#define RUBY_DARRAY_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
// Type for a dynamic array. Use to declare a dynamic array.
|
||||
// It is a pointer so it fits in st_table nicely. Designed
|
||||
// to be fairly type-safe.
|
||||
//
|
||||
// NULL is a valid empty dynamic array.
|
||||
//
|
||||
// Example:
|
||||
// rb_darray(char) char_array = NULL;
|
||||
// if (!rb_darray_append(&char_array, 'e')) abort();
|
||||
// printf("pushed %c\n", *rb_darray_ref(char_array, 0));
|
||||
// rb_darray_free(char_array);
|
||||
//
|
||||
#define rb_darray(T) struct { rb_darray_meta_t meta; T data[]; } *
|
||||
|
||||
// Copy an element out of the array. Warning: not bounds checked.
|
||||
//
|
||||
// T rb_darray_get(rb_darray(T) ary, int32_t idx);
|
||||
//
|
||||
#define rb_darray_get(ary, idx) ((ary)->data[(idx)])
|
||||
|
||||
// Assign to an element. Warning: not bounds checked.
|
||||
//
|
||||
// void rb_darray_set(rb_darray(T) ary, int32_t idx, T element);
|
||||
//
|
||||
#define rb_darray_set(ary, idx, element) ((ary)->data[(idx)] = (element))
|
||||
|
||||
// Get a pointer to an element. Warning: not bounds checked.
|
||||
//
|
||||
// T *rb_darray_ref(rb_darray(T) ary, int32_t idx);
|
||||
//
|
||||
#define rb_darray_ref(ary, idx) (&((ary)->data[(idx)]))
|
||||
|
||||
// Copy a new element into the array. Return 1 on success and 0 on failure.
|
||||
// ptr_to_ary is evaluated multiple times.
|
||||
//
|
||||
// bool rb_darray_append(rb_darray(T) *ptr_to_ary, T element);
|
||||
//
|
||||
#define rb_darray_append(ptr_to_ary, element) ( \
|
||||
rb_darray_ensure_space((ptr_to_ary)) ? ( \
|
||||
rb_darray_set(*(ptr_to_ary), \
|
||||
(*(ptr_to_ary))->meta.size, \
|
||||
(element)), \
|
||||
++((*(ptr_to_ary))->meta.size), \
|
||||
1 \
|
||||
) : 0)
|
||||
|
||||
// Iterate over items of the array in a for loop
|
||||
//
|
||||
#define rb_darray_foreach(ary, idx_name, elem_ptr_var) \
|
||||
for (int idx_name = 0; idx_name < rb_darray_size(ary) && ((elem_ptr_var) = rb_darray_ref(ary, idx_name)); ++idx_name)
|
||||
|
||||
typedef struct rb_darray_meta {
|
||||
int32_t size;
|
||||
int32_t capa;
|
||||
} rb_darray_meta_t;
|
||||
|
||||
// Get the size of the dynamic array.
|
||||
//
|
||||
static inline int32_t
|
||||
rb_darray_size(const void *ary)
|
||||
{
|
||||
const rb_darray_meta_t *meta = ary;
|
||||
return meta ? meta->size : 0;
|
||||
}
|
||||
|
||||
// Get the capacity of the dynamic array.
|
||||
//
|
||||
static inline int32_t
|
||||
rb_darray_capa(const void *ary)
|
||||
{
|
||||
const rb_darray_meta_t *meta = ary;
|
||||
return meta ? meta->capa : 0;
|
||||
}
|
||||
|
||||
// Free the dynamic array.
|
||||
//
|
||||
static inline void
|
||||
rb_darray_free(void *ary)
|
||||
{
|
||||
free(ary);
|
||||
}
|
||||
|
||||
// Remove the last element of the array.
|
||||
//
|
||||
#define rb_darray_pop_back(ary) ((ary)->meta.size--)
|
||||
|
||||
// Internal macro
|
||||
// Ensure there is space for one more element. Return 1 on success and 0 on failure.
|
||||
// `ptr_to_ary` is evaluated multiple times.
|
||||
#define rb_darray_ensure_space(ptr_to_ary) ( \
|
||||
(rb_darray_capa(*(ptr_to_ary)) > rb_darray_size(*(ptr_to_ary))) ? \
|
||||
1 : \
|
||||
rb_darray_double(ptr_to_ary, sizeof((*(ptr_to_ary))->data[0])))
|
||||
|
||||
// Internal function
|
||||
static inline int
|
||||
rb_darray_double(void *ptr_to_ary, size_t element_size)
|
||||
{
|
||||
rb_darray_meta_t **ptr_to_ptr_to_meta = ptr_to_ary;
|
||||
const rb_darray_meta_t *meta = *ptr_to_ptr_to_meta;
|
||||
int32_t current_capa = rb_darray_capa(meta);
|
||||
|
||||
int32_t new_capa;
|
||||
// Calculate new capacity
|
||||
if (current_capa == 0) {
|
||||
new_capa = 1;
|
||||
}
|
||||
else {
|
||||
int64_t doubled = 2 * (int64_t)current_capa;
|
||||
new_capa = (int32_t)doubled;
|
||||
if (new_capa != doubled) return 0;
|
||||
}
|
||||
|
||||
// Calculate new buffer size
|
||||
size_t current_buffer_size = element_size * (size_t)current_capa + (meta ? sizeof(*meta) : 0);
|
||||
size_t new_buffer_size = element_size * (size_t)new_capa + sizeof(*meta);
|
||||
if (new_buffer_size <= current_buffer_size) return 0;
|
||||
|
||||
rb_darray_meta_t *doubled_ary = realloc(*ptr_to_ptr_to_meta, new_buffer_size);
|
||||
if (!doubled_ary) return 0;
|
||||
|
||||
if (meta == NULL) {
|
||||
// First allocation. Initialize size. On subsequence allocations
|
||||
// realloc takes care of carrying over the size.
|
||||
doubled_ary->size = 0;
|
||||
}
|
||||
|
||||
doubled_ary->capa = new_capa;
|
||||
|
||||
*ptr_to_ptr_to_meta = doubled_ary;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* RUBY_DARRAY_H */
|
|
@ -164,6 +164,7 @@ coverage.o: $(top_srcdir)/ccan/check_type/check_type.h
|
|||
coverage.o: $(top_srcdir)/ccan/container_of/container_of.h
|
||||
coverage.o: $(top_srcdir)/ccan/list/list.h
|
||||
coverage.o: $(top_srcdir)/ccan/str/str.h
|
||||
coverage.o: $(top_srcdir)/darray.h
|
||||
coverage.o: $(top_srcdir)/gc.h
|
||||
coverage.o: $(top_srcdir)/internal.h
|
||||
coverage.o: $(top_srcdir)/internal/array.h
|
||||
|
|
|
@ -540,6 +540,7 @@ objspace_dump.o: $(top_srcdir)/ccan/check_type/check_type.h
|
|||
objspace_dump.o: $(top_srcdir)/ccan/container_of/container_of.h
|
||||
objspace_dump.o: $(top_srcdir)/ccan/list/list.h
|
||||
objspace_dump.o: $(top_srcdir)/ccan/str/str.h
|
||||
objspace_dump.o: $(top_srcdir)/darray.h
|
||||
objspace_dump.o: $(top_srcdir)/gc.h
|
||||
objspace_dump.o: $(top_srcdir)/internal.h
|
||||
objspace_dump.o: $(top_srcdir)/internal/array.h
|
||||
|
|
3
iseq.c
3
iseq.c
|
@ -109,6 +109,7 @@ rb_iseq_free(const rb_iseq_t *iseq)
|
|||
if (iseq && iseq->body) {
|
||||
struct rb_iseq_constant_body *const body = iseq->body;
|
||||
mjit_free_iseq(iseq); /* Notify MJIT */
|
||||
rb_ujit_iseq_free(body);
|
||||
ruby_xfree((void *)body->iseq_encoded);
|
||||
ruby_xfree((void *)body->insns_info.body);
|
||||
if (body->insns_info.positions) ruby_xfree((void *)body->insns_info.positions);
|
||||
|
@ -321,6 +322,7 @@ rb_iseq_update_references(rb_iseq_t *iseq)
|
|||
#if USE_MJIT
|
||||
mjit_update_references(iseq);
|
||||
#endif
|
||||
rb_ujit_iseq_update_references(body);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -401,6 +403,7 @@ rb_iseq_mark(const rb_iseq_t *iseq)
|
|||
#if USE_MJIT
|
||||
mjit_mark_cc_entries(body);
|
||||
#endif
|
||||
rb_ujit_iseq_mark(body);
|
||||
}
|
||||
|
||||
if (FL_TEST_RAW((VALUE)iseq, ISEQ_NOT_LOADED_YET)) {
|
||||
|
|
3
ujit.h
3
ujit.h
|
@ -54,5 +54,8 @@ void rb_ujit_compile_iseq(const rb_iseq_t *iseq);
|
|||
void rb_ujit_init(struct rb_ujit_options *options);
|
||||
void rb_ujit_bop_redefined(VALUE klass, const rb_method_entry_t *me, enum ruby_basic_operators bop);
|
||||
void rb_ujit_constant_state_changed(void);
|
||||
void rb_ujit_iseq_mark(const struct rb_iseq_constant_body *body);
|
||||
void rb_ujit_iseq_update_references(const struct rb_iseq_constant_body *body);
|
||||
void rb_ujit_iseq_free(const struct rb_iseq_constant_body *body);
|
||||
|
||||
#endif // #ifndef UJIT_H
|
||||
|
|
117
ujit_core.c
117
ujit_core.c
|
@ -16,12 +16,6 @@
|
|||
// Maximum number of branch instructions we can track
|
||||
#define MAX_BRANCHES 32768
|
||||
|
||||
// Default versioning context (no type information)
|
||||
const ctx_t DEFAULT_CTX = { { 0 }, 0 };
|
||||
|
||||
// Table of block versions indexed by (iseq, index) tuples
|
||||
st_table *version_tbl;
|
||||
|
||||
// Registered branch entries
|
||||
branch_t branch_entries[MAX_BRANCHES];
|
||||
uint32_t num_branches = 0;
|
||||
|
@ -150,25 +144,55 @@ int ctx_diff(const ctx_t* src, const ctx_t* dst)
|
|||
return diff;
|
||||
}
|
||||
|
||||
// Add a block version to the map
|
||||
static void add_block_version(blockid_t blockid, block_t* block)
|
||||
static block_t *
|
||||
get_first_version(const rb_iseq_t *iseq, unsigned idx)
|
||||
{
|
||||
struct rb_iseq_constant_body *body = iseq->body;
|
||||
if (rb_darray_size(body->ujit_blocks) == 0) {
|
||||
return NULL;
|
||||
}
|
||||
RUBY_ASSERT((unsigned)rb_darray_size(body->ujit_blocks) == body->iseq_size);
|
||||
return rb_darray_get(body->ujit_blocks, idx);
|
||||
}
|
||||
|
||||
// Add a block version to the map. Block should be fully constructed
|
||||
static void
|
||||
add_block_version(blockid_t blockid, block_t* block)
|
||||
{
|
||||
// Function entry blocks must have stack size 0
|
||||
RUBY_ASSERT(!(block->blockid.idx == 0 && block->ctx.stack_size > 0));
|
||||
const rb_iseq_t *iseq = block->blockid.iseq;
|
||||
struct rb_iseq_constant_body *body = iseq->body;
|
||||
|
||||
// Ensure ujit_blocks is initialized
|
||||
if (rb_darray_size(body->ujit_blocks) == 0) {
|
||||
// Initialize ujit_blocks to be as wide as body->iseq_encoded
|
||||
// TODO: add resize API for dary
|
||||
while ((unsigned)rb_darray_size(body->ujit_blocks) < body->iseq_size) {
|
||||
(void)rb_darray_append(&body->ujit_blocks, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
block_t *first_version = get_first_version(iseq, blockid.idx);
|
||||
|
||||
// If there exists a version for this block id
|
||||
block_t* first_version = NULL;
|
||||
st_lookup(version_tbl, (st_data_t)&blockid, (st_data_t*)&first_version);
|
||||
|
||||
// Link to the next version in a linked list
|
||||
if (first_version != NULL) {
|
||||
// Link to the next version in a linked list
|
||||
RUBY_ASSERT(block->next == NULL);
|
||||
block->next = first_version;
|
||||
}
|
||||
|
||||
// Add the block version to the map
|
||||
st_insert(version_tbl, (st_data_t)&block->blockid, (st_data_t)block);
|
||||
// Make new block the first version
|
||||
rb_darray_set(body->ujit_blocks, blockid.idx, block);
|
||||
RUBY_ASSERT(find_block_version(blockid, &block->ctx) != NULL);
|
||||
|
||||
{
|
||||
// By writing the new block to the iseq, the iseq now
|
||||
// contains new references to Ruby objects. Run write barriers.
|
||||
RB_OBJ_WRITTEN(iseq, Qundef, block->dependencies.iseq);
|
||||
RB_OBJ_WRITTEN(iseq, Qundef, block->dependencies.cc);
|
||||
RB_OBJ_WRITTEN(iseq, Qundef, block->dependencies.cme);
|
||||
}
|
||||
}
|
||||
|
||||
// Add an incoming branch for a given block version
|
||||
|
@ -185,15 +209,11 @@ static void add_incoming(block_t* p_block, uint32_t branch_idx)
|
|||
// Count the number of block versions matching a given blockid
|
||||
static size_t count_block_versions(blockid_t blockid)
|
||||
{
|
||||
// If there exists a version for this block id
|
||||
block_t* first_version;
|
||||
if (!rb_st_lookup(version_tbl, (st_data_t)&blockid, (st_data_t*)&first_version))
|
||||
return 0;
|
||||
|
||||
size_t count = 0;
|
||||
block_t *first_version = get_first_version(blockid.iseq, blockid.idx);
|
||||
|
||||
// For each version matching the blockid
|
||||
for (block_t* version = first_version; version != NULL; version = version->next)
|
||||
for (block_t *version = first_version; version != NULL; version = version->next)
|
||||
{
|
||||
count += 1;
|
||||
}
|
||||
|
@ -204,10 +224,10 @@ static size_t count_block_versions(blockid_t blockid)
|
|||
// Retrieve a basic block version for an (iseq, idx) tuple
|
||||
block_t* find_block_version(blockid_t blockid, const ctx_t* ctx)
|
||||
{
|
||||
block_t *first_version = get_first_version(blockid.iseq, blockid.idx);
|
||||
|
||||
// If there exists a version for this block id
|
||||
block_t* first_version;
|
||||
if (!rb_st_lookup(version_tbl, (st_data_t)&blockid, (st_data_t*)&first_version))
|
||||
return NULL;
|
||||
if (!first_version) return NULL;
|
||||
|
||||
// Best match found
|
||||
block_t* best_version = NULL;
|
||||
|
@ -559,32 +579,37 @@ void gen_direct_jump(
|
|||
branch_entries[branch_idx] = branch_entry;
|
||||
}
|
||||
|
||||
// Remove all references to a block then free it.
|
||||
void
|
||||
ujit_free_block(block_t *block)
|
||||
{
|
||||
ujit_unlink_method_lookup_dependency(block);
|
||||
|
||||
free(block->incoming);
|
||||
free(block);
|
||||
}
|
||||
|
||||
// Invalidate one specific block version
|
||||
void
|
||||
invalidate_block_version(block_t* block)
|
||||
{
|
||||
const rb_iseq_t *iseq = block->blockid.iseq;
|
||||
|
||||
fprintf(stderr, "invalidating block (%p, %d)\n", block->blockid.iseq, block->blockid.idx);
|
||||
fprintf(stderr, "block=%p\n", block);
|
||||
|
||||
// Find the first version for this blockid
|
||||
block_t* first_block = NULL;
|
||||
rb_st_lookup(version_tbl, (st_data_t)&block->blockid, (st_data_t*)&first_block);
|
||||
block_t *first_block = get_first_version(iseq, block->blockid.idx);
|
||||
RUBY_ASSERT(first_block != NULL);
|
||||
|
||||
// Remove the version object from the map so we can re-generate stubs
|
||||
if (first_block == block)
|
||||
{
|
||||
st_data_t key = (st_data_t)&block->blockid;
|
||||
int success = st_delete(version_tbl, &key, NULL);
|
||||
RUBY_ASSERT(success);
|
||||
// Remove references to this block
|
||||
if (first_block == block) {
|
||||
// Make the next block the new first version
|
||||
rb_darray_set(iseq->body->ujit_blocks, block->blockid.idx, block->next);
|
||||
}
|
||||
else
|
||||
{
|
||||
else {
|
||||
bool deleted = false;
|
||||
for (block_t* cur = first_block; cur != NULL; cur = cur->next)
|
||||
{
|
||||
if (cur->next == block)
|
||||
{
|
||||
for (block_t* cur = first_block; cur != NULL; cur = cur->next) {
|
||||
if (cur->next == block) {
|
||||
cur->next = cur->next->next;
|
||||
break;
|
||||
}
|
||||
|
@ -635,9 +660,9 @@ invalidate_block_version(block_t* block)
|
|||
}
|
||||
}
|
||||
|
||||
// If the block is an entry point, it needs to be unmapped from its iseq
|
||||
const rb_iseq_t* iseq = block->blockid.iseq;
|
||||
uint32_t idx = block->blockid.idx;
|
||||
// FIXME: the following says "if", but it's unconditional.
|
||||
// If the block is an entry point, it needs to be unmapped from its iseq
|
||||
VALUE* entry_pc = &iseq->body->iseq_encoded[idx];
|
||||
int entry_opcode = opcode_at_pc(iseq, entry_pc);
|
||||
|
||||
|
@ -654,9 +679,7 @@ invalidate_block_version(block_t* block)
|
|||
// FIXME:
|
||||
// Call continuation addresses on the stack can also be atomically replaced by jumps going to the stub.
|
||||
|
||||
// Free the old block version object
|
||||
free(block->incoming);
|
||||
free(block);
|
||||
ujit_free_block(block);
|
||||
|
||||
fprintf(stderr, "invalidation done\n");
|
||||
}
|
||||
|
@ -678,14 +701,8 @@ st_index_t blockid_hash(st_data_t arg)
|
|||
return hash0 ^ hash1;
|
||||
}
|
||||
|
||||
static const struct st_hash_type hashtype_blockid = {
|
||||
blockid_cmp,
|
||||
blockid_hash,
|
||||
};
|
||||
|
||||
void
|
||||
ujit_init_core(void)
|
||||
{
|
||||
// Initialize the version hash table
|
||||
version_tbl = st_init_table(&hashtype_blockid);
|
||||
// Nothing yet
|
||||
}
|
||||
|
|
16
ujit_core.h
16
ujit_core.h
|
@ -100,7 +100,7 @@ Basic block version
|
|||
Represents a portion of an iseq compiled with a given context
|
||||
Note: care must be taken to minimize the size of block_t objects
|
||||
*/
|
||||
typedef struct BlockVersion
|
||||
typedef struct ujit_block_version
|
||||
{
|
||||
// Bytecode sequence (iseq, idx) this is a version of
|
||||
blockid_t blockid;
|
||||
|
@ -116,12 +116,21 @@ typedef struct BlockVersion
|
|||
uint32_t end_pos;
|
||||
|
||||
// List of incoming branches indices
|
||||
uint32_t* incoming;
|
||||
uint32_t *incoming;
|
||||
uint32_t num_incoming;
|
||||
|
||||
// Next block version for this blockid (singly-linked list)
|
||||
struct BlockVersion* next;
|
||||
struct ujit_block_version *next;
|
||||
|
||||
// List node for all block versions in an iseq
|
||||
struct list_node iseq_block_node;
|
||||
|
||||
// GC managed objects that this block depend on
|
||||
struct {
|
||||
VALUE cc;
|
||||
VALUE cme;
|
||||
VALUE iseq;
|
||||
} dependencies;
|
||||
} block_t;
|
||||
|
||||
// Context object methods
|
||||
|
@ -135,6 +144,7 @@ int ctx_diff(const ctx_t* src, const ctx_t* dst);
|
|||
block_t* find_block_version(blockid_t blockid, const ctx_t* ctx);
|
||||
block_t* gen_block_version(blockid_t blockid, const ctx_t* ctx);
|
||||
uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx);
|
||||
void ujit_free_block(block_t *block);
|
||||
|
||||
void gen_branch(
|
||||
const ctx_t* src_ctx,
|
||||
|
|
259
ujit_iface.c
259
ujit_iface.c
|
@ -14,6 +14,7 @@
|
|||
#include "ujit_core.h"
|
||||
#include "ujit_hooks.inc"
|
||||
#include "ujit.rbinc"
|
||||
#include "darray.h"
|
||||
|
||||
#if HAVE_LIBCAPSTONE
|
||||
#include <capstone/capstone.h>
|
||||
|
@ -30,7 +31,6 @@ int64_t rb_ujit_exec_insns_count = 0;
|
|||
static int64_t exit_op_count[VM_INSTRUCTION_SIZE] = { 0 };
|
||||
static int64_t compiled_iseq_count = 0;
|
||||
|
||||
extern st_table * version_tbl;
|
||||
extern codeblock_t *cb;
|
||||
extern codeblock_t *ocb;
|
||||
// Hash table of encoded instructions
|
||||
|
@ -131,99 +131,79 @@ struct ujit_root_struct {};
|
|||
// is only valid when cme_or_cc is valid
|
||||
static st_table *method_lookup_dependency;
|
||||
|
||||
struct compiled_region_array {
|
||||
int32_t size;
|
||||
int32_t capa;
|
||||
struct compiled_region {
|
||||
block_t *block;
|
||||
const struct rb_callcache *cc;
|
||||
const rb_callable_method_entry_t *cme;
|
||||
} data[];
|
||||
struct compiled_region {
|
||||
block_t *block;
|
||||
};
|
||||
|
||||
// Add an element to a region array, or allocate a new region array.
|
||||
static struct compiled_region_array *
|
||||
add_compiled_region(struct compiled_region_array *array, struct compiled_region *region)
|
||||
{
|
||||
if (!array) {
|
||||
// Allocate a brand new array with space for one
|
||||
array = malloc(sizeof(*array) + sizeof(array->data[0]));
|
||||
if (!array) {
|
||||
return NULL;
|
||||
}
|
||||
array->size = 0;
|
||||
array->capa = 1;
|
||||
}
|
||||
if (array->size == INT32_MAX) {
|
||||
return NULL;
|
||||
}
|
||||
// Check if the region is already present
|
||||
for (int32_t i = 0; i < array->size; i++) {
|
||||
if (array->data[i].block == region->block && array->data[i].cc == region->cc && array->data[i].cme == region->cme) {
|
||||
return array;
|
||||
}
|
||||
}
|
||||
if (array->size + 1 > array->capa) {
|
||||
// Double the array's capacity.
|
||||
int64_t double_capa = ((int64_t)array->capa) * 2;
|
||||
int32_t new_capa = (int32_t)double_capa;
|
||||
if (new_capa != double_capa) {
|
||||
return NULL;
|
||||
}
|
||||
array = realloc(array, sizeof(*array) + new_capa * sizeof(array->data[0]));
|
||||
if (array == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
array->capa = new_capa;
|
||||
}
|
||||
|
||||
array->data[array->size] = *region;
|
||||
array->size++;
|
||||
return array;
|
||||
}
|
||||
typedef rb_darray(struct compiled_region) block_array_t;
|
||||
|
||||
static int
|
||||
add_lookup_dependency_i(st_data_t *key, st_data_t *value, st_data_t data, int existing)
|
||||
{
|
||||
struct compiled_region *region = (struct compiled_region *)data;
|
||||
|
||||
struct compiled_region_array *regions = NULL;
|
||||
block_array_t regions = NULL;
|
||||
if (existing) {
|
||||
regions = (struct compiled_region_array *)*value;
|
||||
regions = (block_array_t )*value;
|
||||
}
|
||||
regions = add_compiled_region(regions, region);
|
||||
if (!regions) {
|
||||
if (!rb_darray_append(®ions, *region)) {
|
||||
rb_bug("ujit: failed to add method lookup dependency"); // TODO: we could bail out of compiling instead
|
||||
}
|
||||
|
||||
*value = (st_data_t)regions;
|
||||
return ST_CONTINUE;
|
||||
}
|
||||
|
||||
// Remember that the currently compiling region is only valid while cme and cc are valid
|
||||
void
|
||||
assume_method_lookup_stable(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme, block_t* block)
|
||||
assume_method_lookup_stable(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme, block_t *block)
|
||||
{
|
||||
RUBY_ASSERT(block != NULL);
|
||||
struct compiled_region region = { .block = block, .cc = cc, .cme = cme };
|
||||
RUBY_ASSERT(block->dependencies.cc == 0 && block->dependencies.cme == 0);
|
||||
struct compiled_region region = { .block = block };
|
||||
st_update(method_lookup_dependency, (st_data_t)cme, add_lookup_dependency_i, (st_data_t)®ion);
|
||||
block->dependencies.cme = (VALUE)cme;
|
||||
st_update(method_lookup_dependency, (st_data_t)cc, add_lookup_dependency_i, (st_data_t)®ion);
|
||||
// FIXME: This is a leak! When either the cme or the cc become invalid, the other also needs to go
|
||||
block->dependencies.cc = (VALUE)cc;
|
||||
}
|
||||
|
||||
static int
|
||||
ujit_root_mark_i(st_data_t k, st_data_t v, st_data_t ignore)
|
||||
{
|
||||
// FIXME: This leaks everything that end up in the dependency table!
|
||||
// One way to deal with this is with weak references...
|
||||
rb_gc_mark((VALUE)k);
|
||||
struct compiled_region_array *regions = (void *)v;
|
||||
for (int32_t i = 0; i < regions->size; i++) {
|
||||
rb_gc_mark((VALUE)regions->data[i].block->blockid.iseq);
|
||||
}
|
||||
// Lifetime notes: cc and cme get added in pairs into the table. One of
|
||||
// them should become invalid before dying. When one of them invalidate we
|
||||
// remove the pair from the table. Blocks remove themself from the table
|
||||
// when they die.
|
||||
rb_gc_mark_movable((VALUE)k);
|
||||
|
||||
return ST_CONTINUE;
|
||||
}
|
||||
|
||||
static int
|
||||
method_lookup_dep_table_update_keys(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
|
||||
{
|
||||
*key = rb_gc_location(rb_gc_location((VALUE)*key));
|
||||
|
||||
return ST_CONTINUE;
|
||||
}
|
||||
|
||||
static int
|
||||
replace_all(st_data_t key, st_data_t value, st_data_t argp, int error)
|
||||
{
|
||||
return ST_REPLACE;
|
||||
}
|
||||
|
||||
// GC callback during compaction
|
||||
static void
|
||||
ujit_root_update_references(void *ptr)
|
||||
{
|
||||
if (method_lookup_dependency) {
|
||||
if (st_foreach_with_replace(method_lookup_dependency, replace_all, method_lookup_dep_table_update_keys, 0)) {
|
||||
RUBY_ASSERT(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GC callback during mark phase
|
||||
static void
|
||||
ujit_root_mark(void *ptr)
|
||||
|
@ -251,7 +231,7 @@ ujit_root_memsize(const void *ptr)
|
|||
// TODO: make this write barrier protected
|
||||
static const rb_data_type_t ujit_root_type = {
|
||||
"ujit_root",
|
||||
{ujit_root_mark, ujit_root_free, ujit_root_memsize, },
|
||||
{ujit_root_mark, ujit_root_free, ujit_root_memsize, ujit_root_update_references},
|
||||
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
||||
};
|
||||
|
||||
|
@ -266,48 +246,56 @@ rb_ujit_method_lookup_change(VALUE cme_or_cc)
|
|||
|
||||
RUBY_ASSERT(IMEMO_TYPE_P(cme_or_cc, imemo_ment) || IMEMO_TYPE_P(cme_or_cc, imemo_callcache));
|
||||
|
||||
st_data_t image, other_image;
|
||||
if (st_lookup(method_lookup_dependency, (st_data_t)cme_or_cc, &image)) {
|
||||
struct compiled_region_array *array = (void *)image;
|
||||
// Invalidate all regions that depend on the cme or cc
|
||||
st_data_t key = (st_data_t)cme_or_cc, image;
|
||||
if (st_delete(method_lookup_dependency, &key, &image)) {
|
||||
block_array_t array = (void *)image;
|
||||
struct compiled_region *elem;
|
||||
|
||||
// Invalidate all regions that depend on the cme or cc
|
||||
for (int32_t i = 0; i < array->size; i++) {
|
||||
struct compiled_region *region = &array->data[i];
|
||||
|
||||
VALUE other_key;
|
||||
if (IMEMO_TYPE_P(cme_or_cc, imemo_ment)) {
|
||||
other_key = (VALUE)region->cc;
|
||||
}
|
||||
else {
|
||||
other_key = (VALUE)region->cme;
|
||||
}
|
||||
|
||||
if (!st_lookup(method_lookup_dependency, (st_data_t)other_key, &other_image)) {
|
||||
// See assume_method_lookup_stable() for why this should always hit.
|
||||
rb_bug("method lookup dependency bookkeeping bug");
|
||||
}
|
||||
struct compiled_region_array *other_region_array = (void *)other_image;
|
||||
const int32_t other_size = other_region_array->size;
|
||||
// Find the block we are invalidating in the other region array
|
||||
for (int32_t i = 0; i < other_size; i++) {
|
||||
if (other_region_array->data[i].block == region->block) {
|
||||
// Do a shuffle remove. Order in the region array doesn't matter.
|
||||
other_region_array->data[i] = other_region_array->data[other_size - 1];
|
||||
other_region_array->size--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
RUBY_ASSERT(other_region_array->size < other_size);
|
||||
|
||||
invalidate_block_version(region->block);
|
||||
rb_darray_foreach(array, i, elem) {
|
||||
invalidate_block_version(elem->block);
|
||||
}
|
||||
|
||||
array->size = 0;
|
||||
rb_darray_free(array);
|
||||
}
|
||||
|
||||
RB_VM_LOCK_LEAVE();
|
||||
}
|
||||
|
||||
// Remove a block from the method lookup dependency table
|
||||
static void
|
||||
remove_method_lookup_dependency(VALUE cc_or_cme, block_t *block)
|
||||
{
|
||||
st_data_t key = (st_data_t)cc_or_cme, image;
|
||||
if (st_lookup(method_lookup_dependency, key, &image)) {
|
||||
block_array_t array = (void *)image;
|
||||
struct compiled_region *elem;
|
||||
|
||||
// Find the block we are removing
|
||||
rb_darray_foreach(array, i, elem) {
|
||||
if (elem->block == block) {
|
||||
// Remove the current element by moving the last element here.
|
||||
// Order in the region array doesn't matter.
|
||||
*elem = rb_darray_get(array, rb_darray_size(array) - 1);
|
||||
rb_darray_pop_back(array);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (rb_darray_size(array) == 0) {
|
||||
st_delete(method_lookup_dependency, &key, NULL);
|
||||
rb_darray_free(array);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ujit_unlink_method_lookup_dependency(block_t *block)
|
||||
{
|
||||
if (block->dependencies.cc) remove_method_lookup_dependency(block->dependencies.cc, block);
|
||||
if (block->dependencies.cme) remove_method_lookup_dependency(block->dependencies.cme, block);
|
||||
}
|
||||
|
||||
void
|
||||
rb_ujit_compile_iseq(const rb_iseq_t *iseq)
|
||||
{
|
||||
|
@ -336,19 +324,6 @@ struct ujit_block_itr {
|
|||
VALUE list;
|
||||
};
|
||||
|
||||
static int
|
||||
iseqw_ujit_collect_blocks(st_data_t key, st_data_t value, st_data_t argp)
|
||||
{
|
||||
block_t * block = (block_t *)value;
|
||||
struct ujit_block_itr * itr = (struct ujit_block_itr *)argp;
|
||||
|
||||
if (block->blockid.iseq == itr->iseq) {
|
||||
VALUE rb_block = TypedData_Wrap_Struct(cUjitBlock, &ujit_block_type, block);
|
||||
rb_ary_push(itr->list, rb_block);
|
||||
}
|
||||
return ST_CONTINUE;
|
||||
}
|
||||
|
||||
/* Get a list of the UJIT blocks associated with `rb_iseq` */
|
||||
static VALUE
|
||||
ujit_blocks_for(VALUE mod, VALUE rb_iseq)
|
||||
|
@ -356,15 +331,19 @@ ujit_blocks_for(VALUE mod, VALUE rb_iseq)
|
|||
if (CLASS_OF(rb_iseq) != rb_cISeq) {
|
||||
return rb_ary_new();
|
||||
}
|
||||
|
||||
const rb_iseq_t *iseq = rb_iseqw_to_iseq(rb_iseq);
|
||||
st_table * vt = (st_table *)version_tbl;
|
||||
struct ujit_block_itr itr;
|
||||
itr.iseq = iseq;
|
||||
itr.list = rb_ary_new();
|
||||
block_t **element;
|
||||
VALUE all_versions = rb_ary_new();
|
||||
|
||||
rb_st_foreach(vt, iseqw_ujit_collect_blocks, (st_data_t)&itr);
|
||||
rb_darray_foreach(iseq->body->ujit_blocks, idx, element) {
|
||||
for (block_t *version = *element; version; version = version->next) {
|
||||
VALUE rb_block = TypedData_Wrap_Struct(cUjitBlock, &ujit_block_type, version);
|
||||
rb_ary_push(all_versions, rb_block);
|
||||
}
|
||||
}
|
||||
|
||||
return itr.list;
|
||||
return all_versions;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
|
@ -571,6 +550,52 @@ print_ujit_stats(void)
|
|||
}
|
||||
#endif // if RUBY_DEBUG
|
||||
|
||||
void
|
||||
rb_ujit_iseq_mark(const struct rb_iseq_constant_body *body)
|
||||
{
|
||||
block_t **element;
|
||||
rb_darray_foreach(body->ujit_blocks, idx, element) {
|
||||
for (block_t *block = *element; block; block = block->next) {
|
||||
rb_gc_mark_movable((VALUE)block->blockid.iseq);
|
||||
|
||||
rb_gc_mark_movable(block->dependencies.cc);
|
||||
rb_gc_mark_movable(block->dependencies.cme);
|
||||
rb_gc_mark_movable(block->dependencies.iseq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rb_ujit_iseq_update_references(const struct rb_iseq_constant_body *body)
|
||||
{
|
||||
block_t **element;
|
||||
rb_darray_foreach(body->ujit_blocks, idx, element) {
|
||||
for (block_t *block = *element; block; block = block->next) {
|
||||
block->blockid.iseq = (const rb_iseq_t *)rb_gc_location((VALUE)block->blockid.iseq);
|
||||
|
||||
block->dependencies.cc = rb_gc_location(block->dependencies.cc);
|
||||
block->dependencies.cme = rb_gc_location(block->dependencies.cme);
|
||||
block->dependencies.iseq = rb_gc_location(block->dependencies.iseq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rb_ujit_iseq_free(const struct rb_iseq_constant_body *body)
|
||||
{
|
||||
block_t **element;
|
||||
rb_darray_foreach(body->ujit_blocks, idx, element) {
|
||||
block_t *block = *element;
|
||||
while (block) {
|
||||
block_t *next = block->next;
|
||||
ujit_free_block(block);
|
||||
block = next;
|
||||
}
|
||||
}
|
||||
|
||||
rb_darray_free(body->ujit_blocks);
|
||||
}
|
||||
|
||||
void
|
||||
rb_ujit_init(struct rb_ujit_options *options)
|
||||
{
|
||||
|
|
|
@ -34,5 +34,6 @@ bool cfunc_needs_frame(const rb_method_cfunc_t *cfunc);
|
|||
void assume_method_lookup_stable(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme, block_t* block);
|
||||
// this function *must* return passed exit_pc
|
||||
const VALUE *rb_ujit_count_side_exit_op(const VALUE *exit_pc);
|
||||
void ujit_unlink_method_lookup_dependency(block_t *block);
|
||||
|
||||
#endif // #ifndef UJIT_IFACE_H
|
||||
|
|
|
@ -77,6 +77,7 @@
|
|||
#include "ruby/st.h"
|
||||
#include "ruby_atomic.h"
|
||||
#include "vm_opts.h"
|
||||
#include "darray.h"
|
||||
|
||||
#include "ruby/thread_native.h"
|
||||
#if defined(_WIN32)
|
||||
|
@ -311,6 +312,8 @@ pathobj_realpath(VALUE pathobj)
|
|||
/* Forward declarations */
|
||||
struct rb_mjit_unit;
|
||||
|
||||
typedef rb_darray(struct ujit_block_version *) rb_ujit_block_array_t;
|
||||
|
||||
struct rb_iseq_constant_body {
|
||||
enum iseq_type {
|
||||
ISEQ_TYPE_TOP,
|
||||
|
@ -449,6 +452,8 @@ struct rb_iseq_constant_body {
|
|||
long unsigned total_calls; /* number of total calls with `mjit_exec()` */
|
||||
struct rb_mjit_unit *jit_unit;
|
||||
#endif
|
||||
|
||||
rb_ujit_block_array_t ujit_blocks; // empty, or has a size equal to iseq_size
|
||||
};
|
||||
|
||||
/* T_IMEMO/iseq */
|
||||
|
|
Загрузка…
Ссылка в новой задаче