Finer-grained constant cache invalidation (take 2)

This commit reintroduces finer-grained constant cache invalidation.
After 8008fb7 got merged, it was causing issues on token-threaded
builds (such as on Windows).

The issue was that when you're iterating through instruction sequences
and using the translator functions to get back the instruction structs,
you're either using `rb_vm_insn_null_translator` or
`rb_vm_insn_addr2insn2` depending if it's a direct-threading build.
`rb_vm_insn_addr2insn2` does some normalization to always return to
you the non-trace version of whatever instruction you're looking at.
`rb_vm_insn_null_translator` does not do that normalization.

This means that when you're looping through the instructions if you're
trying to do an opcode comparison, it can change depending on the type
of threading that you're using. This can be very confusing. So, this
commit creates a new translator function
`rb_vm_insn_normalizing_translator` to always return the non-trace
version so that opcode comparisons don't have to worry about different
configurations.

[Feature #18589]
This commit is contained in:
Kevin Newton 2022-03-31 11:04:25 -04:00 коммит произвёл Alan Wu
Родитель 20c190f95a
Коммит 6068da8937
17 изменённых файлов: 491 добавлений и 81 удалений

Просмотреть файл

@ -0,0 +1,22 @@
$VERBOSE = nil
CONSTANT1 = 1
CONSTANT2 = 1
CONSTANT3 = 1
CONSTANT4 = 1
CONSTANT5 = 1
def constants
[CONSTANT1, CONSTANT2, CONSTANT3, CONSTANT4, CONSTANT5]
end
500_000.times do
constants
# With previous behavior, this would cause all of the constant caches
# associated with the constant lookups listed above to invalidate, meaning
# they would all have to be fetched again. With current behavior, it only
# invalidates when a name matches, so the following constant set shouldn't
# impact the constant lookups listed above.
INVALIDATE = true
end

Просмотреть файл

@ -0,0 +1,187 @@
# Constant lookup is cached.
assert_equal '1', %q{
CONST = 1
def const
CONST
end
const
const
}
# Invalidate when a constant is set.
assert_equal '2', %q{
CONST = 1
def const
CONST
end
const
CONST = 2
const
}
# Invalidate when a constant of the same name is set.
assert_equal '1', %q{
CONST = 1
def const
CONST
end
const
class Container
CONST = 2
end
const
}
# Invalidate when a constant is removed.
assert_equal 'missing', %q{
class Container
CONST = 1
def const
CONST
end
def self.const_missing(name)
'missing'
end
new.const
remove_const :CONST
end
Container.new.const
}
# Invalidate when a constant's visibility changes.
assert_equal 'missing', %q{
class Container
CONST = 1
def self.const_missing(name)
'missing'
end
end
def const
Container::CONST
end
const
Container.private_constant :CONST
const
}
# Invalidate when a constant's visibility changes even if the call to the
# visibility change method fails.
assert_equal 'missing', %q{
class Container
CONST1 = 1
def self.const_missing(name)
'missing'
end
end
def const1
Container::CONST1
end
const1
begin
Container.private_constant :CONST1, :CONST2
rescue NameError
end
const1
}
# Invalidate when a module is included.
assert_equal 'INCLUDE', %q{
module Include
CONST = :INCLUDE
end
class Parent
CONST = :PARENT
end
class Child < Parent
def const
CONST
end
new.const
include Include
end
Child.new.const
}
# Invalidate when const_missing is hit.
assert_equal '2', %q{
module Container
Foo = 1
Bar = 2
class << self
attr_accessor :count
def const_missing(name)
@count += 1
@count == 1 ? Foo : Bar
end
end
@count = 0
end
def const
Container::Baz
end
const
const
}
# Invalidate when the iseq gets cleaned up.
assert_equal '2', %q{
CONSTANT = 1
iseq = RubyVM::InstructionSequence.compile(<<~RUBY)
CONSTANT
RUBY
iseq.eval
iseq = nil
GC.start
CONSTANT = 2
}
# Invalidate when the iseq gets cleaned up even if it was never in the cache.
assert_equal '2', %q{
CONSTANT = 1
iseq = RubyVM::InstructionSequence.compile(<<~RUBY)
CONSTANT
RUBY
iseq = nil
GC.start
CONSTANT = 2
}

16
class.c
Просмотреть файл

@ -1169,11 +1169,20 @@ module_in_super_chain(const VALUE klass, VALUE module)
return false;
}
// For each ID key in the class constant table, we're going to clear the VM's
// inline constant caches associated with it.
static enum rb_id_table_iterator_result
clear_constant_cache_i(ID id, VALUE value, void *data)
{
rb_clear_constant_cache_for_id(id);
return ID_TABLE_CONTINUE;
}
static int
do_include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super, bool check_cyclic)
{
VALUE p, iclass, origin_stack = 0;
int method_changed = 0, constant_changed = 0, add_subclass;
int method_changed = 0, add_subclass;
long origin_len;
VALUE klass_origin = RCLASS_ORIGIN(klass);
VALUE original_klass = klass;
@ -1266,13 +1275,12 @@ do_include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super
}
tbl = RCLASS_CONST_TBL(module);
if (tbl && rb_id_table_size(tbl)) constant_changed = 1;
if (tbl && rb_id_table_size(tbl))
rb_id_table_foreach(tbl, clear_constant_cache_i, NULL);
skip:
module = RCLASS_SUPER(module);
}
if (constant_changed) rb_clear_constant_cache();
return method_changed;
}

Просмотреть файл

@ -252,6 +252,13 @@ void rb_undef_alloc_func(VALUE klass);
*/
rb_alloc_func_t rb_get_alloc_func(VALUE klass);
/**
* Clears the inline constant caches associated with a particular ID. Extension
* libraries should not bother with such things. Just forget about this API (or
* even, the presence of constant caches).
*/
void rb_clear_constant_cache_for_id(ID id);
/**
* Resembles `alias`.
*

Просмотреть файл

@ -1028,12 +1028,23 @@ opt_getinlinecache
(VALUE val)
{
struct iseq_inline_constant_cache_entry *ice = ic->entry;
// If there isn't an entry, then we're going to walk through the ISEQ
// starting at this instruction until we get to the associated
// opt_setinlinecache and associate this inline cache with every getconstant
// listed in between. We're doing this here instead of when the instructions
// are first compiled because it's possible to turn off inline caches and we
// want this to work in either case.
if (!ice) {
vm_ic_compile(GET_CFP(), ic);
}
if (ice && vm_ic_hit_p(ice, GET_EP())) {
val = ice->value;
JUMP(dst);
}
else {
val = Qnil;
val = Qnil;
}
}

Просмотреть файл

@ -47,7 +47,6 @@ VALUE rb_obj_is_thread(VALUE obj);
void rb_vm_mark(void *ptr);
void rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx);
PUREFUNC(VALUE rb_vm_top_self(void));
void rb_vm_inc_const_missing_count(void);
const void **rb_vm_get_insns_address_table(void);
VALUE rb_source_location(int *pline);
const char *rb_source_location_cstr(int *pline);

114
iseq.c
Просмотреть файл

@ -102,12 +102,77 @@ compile_data_free(struct iseq_compile_data *compile_data)
}
}
struct iseq_clear_ic_references_data {
IC ic;
};
// This iterator is used to walk through the instructions and clean any
// references to ICs that are contained within this ISEQ out of the VM's
// constant cache table. It passes around a struct that holds the current IC
// we're looking for, which can be NULL (if we haven't hit an opt_getinlinecache
// instruction yet) or set to an IC (if we've hit an opt_getinlinecache and
// haven't yet hit the associated opt_setinlinecache).
static bool
iseq_clear_ic_references_i(VALUE *code, VALUE insn, size_t index, void *data)
{
struct iseq_clear_ic_references_data *ic_data = (struct iseq_clear_ic_references_data *) data;
switch (insn) {
case BIN(opt_getinlinecache): {
RUBY_ASSERT_ALWAYS(ic_data->ic == NULL);
ic_data->ic = (IC) code[index + 2];
return true;
}
case BIN(getconstant): {
if (ic_data->ic != NULL) {
ID id = (ID) code[index + 1];
rb_vm_t *vm = GET_VM();
VALUE lookup_result;
if (rb_id_table_lookup(vm->constant_cache, id, &lookup_result)) {
st_table *ics = (st_table *)lookup_result;
st_data_t ic = (st_data_t)ic_data->ic;
st_delete(ics, &ic, NULL);
if (ics->num_entries == 0) {
rb_id_table_delete(vm->constant_cache, id);
st_free_table(ics);
}
}
}
return true;
}
case BIN(opt_setinlinecache): {
RUBY_ASSERT_ALWAYS(ic_data->ic != NULL);
ic_data->ic = NULL;
return true;
}
default:
return true;
}
}
// When an ISEQ is being freed, all of its associated ICs are going to go away
// as well. Because of this, we need to walk through the ISEQ, find any
// opt_getinlinecache calls, and clear out the VM's constant cache of associated
// ICs.
static void
iseq_clear_ic_references(const rb_iseq_t *iseq)
{
struct iseq_clear_ic_references_data data = { .ic = NULL };
rb_iseq_each(iseq, 0, iseq_clear_ic_references_i, (void *) &data);
}
void
rb_iseq_free(const rb_iseq_t *iseq)
{
RUBY_FREE_ENTER("iseq");
if (iseq && ISEQ_BODY(iseq)) {
iseq_clear_ic_references(iseq);
struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
mjit_free_iseq(iseq); /* Notify MJIT */
rb_yjit_iseq_free(body);
@ -157,6 +222,22 @@ rb_vm_insn_null_translator(const void *addr)
return (VALUE)addr;
}
// The translator for OPT_DIRECT_THREADED_CODE and OPT_CALL_THREADED_CODE does
// some normalization to always return the non-trace version of instructions. To
// mirror that behavior in token-threaded environments, we normalize in this
// translator by also returning non-trace opcodes.
static VALUE
rb_vm_insn_normalizing_translator(const void *addr)
{
VALUE opcode = (VALUE)addr;
VALUE trace_opcode_threshold = (VM_INSTRUCTION_SIZE / 2);
if (opcode >= trace_opcode_threshold) {
return opcode - trace_opcode_threshold;
}
return opcode;
}
typedef VALUE iseq_value_itr_t(void *ctx, VALUE obj);
typedef VALUE rb_vm_insns_translator_t(const void *addr);
@ -250,6 +331,39 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
}
}
// Similar to rb_iseq_each_value, except that this walks through each
// instruction instead of the associated VALUEs. The provided iterator should
// return a boolean that indicates whether or not to continue iterating.
void
rb_iseq_each(const rb_iseq_t *iseq, size_t start_index, rb_iseq_each_i iterator, void *data)
{
unsigned int size;
VALUE *code;
size_t index;
rb_vm_insns_translator_t *const translator =
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
(FL_TEST((VALUE)iseq, ISEQ_TRANSLATED)) ? rb_vm_insn_addr2insn2 :
#endif
rb_vm_insn_normalizing_translator; // Always pass non-trace opcodes.
const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
size = body->iseq_size;
code = body->iseq_encoded;
for (index = start_index; index < size;) {
void *addr = (void *) code[index];
VALUE insn = translator(addr);
if (!iterator(code, insn, index, data)) {
break;
}
index += insn_len(insn);
}
}
static VALUE
update_each_insn_value(void *ctx, VALUE obj)
{

3
iseq.h
Просмотреть файл

@ -182,6 +182,9 @@ void rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc,
void rb_iseq_mark_insn_storage(struct iseq_compile_data_storage *arena);
/* iseq.c */
typedef bool rb_iseq_each_i(VALUE *code, VALUE insn, size_t index, void *data);
void rb_iseq_each(const rb_iseq_t *iseq, size_t start_index, rb_iseq_each_i iterator, void *data);
VALUE rb_iseq_load(VALUE data, VALUE parent, VALUE opt);
VALUE rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc);
unsigned int rb_iseq_line_no(const rb_iseq_t *iseq, size_t pos);

Просмотреть файл

@ -4,11 +4,11 @@ require 'test/unit'
class TestRubyVM < Test::Unit::TestCase
def test_stat
assert_kind_of Hash, RubyVM.stat
assert_kind_of Integer, RubyVM.stat[:global_constant_state]
assert_kind_of Integer, RubyVM.stat[:class_serial]
RubyVM.stat(stat = {})
assert_not_empty stat
assert_equal stat[:global_constant_state], RubyVM.stat(:global_constant_state)
assert_equal stat[:class_serial], RubyVM.stat(:class_serial)
end
def test_stat_unknown

Просмотреть файл

@ -13,9 +13,9 @@
% # compiler: Capture IC values, locking getinlinecache
struct iseq_inline_constant_cache_entry *ice = ic->entry;
if (ice != NULL && GET_IC_SERIAL(ice) && !status->compile_info->disable_const_cache) {
if (ice != NULL && !status->compile_info->disable_const_cache) {
% # JIT: Inline everything in IC, and cancel the slow path
fprintf(f, " if (vm_inlined_ic_hit_p(0x%"PRIxVALUE", 0x%"PRIxVALUE", (const rb_cref_t *)0x%"PRIxVALUE", %"PRI_SERIALT_PREFIX"u, reg_cfp->ep)) {", ice->flags, ice->value, (VALUE)ice->ic_cref, GET_IC_SERIAL(ice));
fprintf(f, " if (vm_inlined_ic_hit_p(0x%"PRIxVALUE", 0x%"PRIxVALUE", (const rb_cref_t *)0x%"PRIxVALUE", reg_cfp->ep)) {", ice->flags, ice->value, (VALUE)ice->ic_cref);
fprintf(f, " stack[%d] = 0x%"PRIxVALUE";\n", b->stack_size, ice->value);
fprintf(f, " goto label_%d;\n", pos + insn_len(insn) + (int)dst);
fprintf(f, " }");

Просмотреть файл

@ -2848,7 +2848,7 @@ rb_const_remove(VALUE mod, ID id)
undefined_constant(mod, ID2SYM(id));
}
rb_clear_constant_cache();
rb_clear_constant_cache_for_id(id);
val = ce->value;
if (val == Qundef) {
@ -3132,7 +3132,7 @@ rb_const_set(VALUE klass, ID id, VALUE val)
struct rb_id_table *tbl = RCLASS_CONST_TBL(klass);
if (!tbl) {
RCLASS_CONST_TBL(klass) = tbl = rb_id_table_create(0);
rb_clear_constant_cache();
rb_clear_constant_cache_for_id(id);
ce = ZALLOC(rb_const_entry_t);
rb_id_table_insert(tbl, id, (VALUE)ce);
setup_const_entry(ce, klass, val, CONST_PUBLIC);
@ -3210,7 +3210,7 @@ const_tbl_update(struct autoload_const *ac)
struct autoload_data_i *ele = current_autoload_data(klass, id, &ac);
if (ele) {
rb_clear_constant_cache();
rb_clear_constant_cache_for_id(id);
ac->value = val; /* autoload_i is non-WB-protected */
ac->file = rb_source_location(&ac->line);
@ -3238,11 +3238,11 @@ const_tbl_update(struct autoload_const *ac)
"previous definition of %"PRIsVALUE" was here", name);
}
}
rb_clear_constant_cache();
rb_clear_constant_cache_for_id(id);
setup_const_entry(ce, klass, val, visibility);
}
else {
rb_clear_constant_cache();
rb_clear_constant_cache_for_id(id);
ce = ZALLOC(rb_const_entry_t);
rb_id_table_insert(tbl, id, (VALUE)ce);
@ -3297,10 +3297,6 @@ set_const_visibility(VALUE mod, int argc, const VALUE *argv,
VALUE val = argv[i];
id = rb_check_id(&val);
if (!id) {
if (i > 0) {
rb_clear_constant_cache();
}
undefined_constant(mod, val);
}
if ((ce = rb_const_lookup(mod, id))) {
@ -3315,15 +3311,12 @@ set_const_visibility(VALUE mod, int argc, const VALUE *argv,
ac->flag |= flag;
}
}
rb_clear_constant_cache_for_id(id);
}
else {
if (i > 0) {
rb_clear_constant_cache();
}
undefined_constant(mod, ID2SYM(id));
}
}
rb_clear_constant_cache();
}
void

61
vm.c
Просмотреть файл

@ -496,6 +496,16 @@ rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
return FALSE;
}
// Iterator function to loop through each entry in the constant cache and add
// its associated size into the given Hash.
static enum rb_id_table_iterator_result
vm_stat_constant_cache_i(ID id, VALUE table, void *constant_cache)
{
st_index_t size = ((st_table *) table)->num_entries;
rb_hash_aset((VALUE) constant_cache, ID2SYM(id), LONG2NUM(size));
return ID_TABLE_CONTINUE;
}
/*
* call-seq:
* RubyVM.stat -> Hash
@ -504,10 +514,10 @@ rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
*
* Returns a Hash containing implementation-dependent counters inside the VM.
*
* This hash includes information about method/constant cache serials:
* This hash includes information about method/constant caches:
*
* {
* :global_constant_state=>481,
* :constant_cache=>{:RubyVM=>3},
* :class_serial=>9029
* }
*
@ -516,11 +526,10 @@ rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
*
* This method is only expected to work on C Ruby.
*/
static VALUE
vm_stat(int argc, VALUE *argv, VALUE self)
{
static VALUE sym_global_constant_state, sym_class_serial, sym_global_cvar_state;
static VALUE sym_constant_cache, sym_class_serial, sym_global_cvar_state;
VALUE arg = Qnil;
VALUE hash = Qnil, key = Qnil;
@ -537,13 +546,11 @@ vm_stat(int argc, VALUE *argv, VALUE self)
hash = rb_hash_new();
}
if (sym_global_constant_state == 0) {
#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
S(global_constant_state);
S(constant_cache);
S(class_serial);
S(global_cvar_state);
#undef S
}
#define SET(name, attr) \
if (key == sym_##name) \
@ -551,11 +558,25 @@ vm_stat(int argc, VALUE *argv, VALUE self)
else if (hash != Qnil) \
rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
SET(global_constant_state, ruby_vm_global_constant_state);
SET(class_serial, ruby_vm_class_serial);
SET(global_cvar_state, ruby_vm_global_cvar_state);
#undef SET
// Here we're going to set up the constant cache hash that has key-value
// pairs of { name => count }, where name is a Symbol that represents the
// ID in the cache and count is an Integer representing the number of inline
// constant caches associated with that Symbol.
if (key == sym_constant_cache || hash != Qnil) {
VALUE constant_cache = rb_hash_new();
rb_id_table_foreach(GET_VM()->constant_cache, vm_stat_constant_cache_i, (void *) constant_cache);
if (key == sym_constant_cache) {
return constant_cache;
} else {
rb_hash_aset(hash, sym_constant_cache, constant_cache);
}
}
if (!NIL_P(key)) { /* matched key should return above */
rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
}
@ -2818,6 +2839,26 @@ size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
// Used for VM memsize reporting. Returns the size of the at_exit list by
// looping through the linked list and adding up the size of the structs.
static enum rb_id_table_iterator_result
vm_memsize_constant_cache_i(ID id, VALUE ics, void *size)
{
*((size_t *) size) += rb_st_memsize((st_table *) ics);
return ID_TABLE_CONTINUE;
}
// Returns a size_t representing the memory footprint of the VM's constant
// cache, which is the memsize of the table as well as the memsize of all of the
// nested tables.
static size_t
vm_memsize_constant_cache(void)
{
rb_vm_t *vm = GET_VM();
size_t size = rb_id_table_memsize(vm->constant_cache);
rb_id_table_foreach(vm->constant_cache, vm_memsize_constant_cache_i, &size);
return size;
}
static size_t
vm_memsize_at_exit_list(rb_at_exit_list *at_exit)
{
@ -2861,7 +2902,8 @@ vm_memsize(const void *ptr)
rb_st_memsize(vm->frozen_strings) +
vm_memsize_builtin_function_table(vm->builtin_function_table) +
rb_id_table_memsize(vm->negative_cme_table) +
rb_st_memsize(vm->overloaded_cme_table)
rb_st_memsize(vm->overloaded_cme_table) +
vm_memsize_constant_cache()
);
// TODO
@ -3935,6 +3977,7 @@ Init_BareVM(void)
ruby_current_vm_ptr = vm;
vm->negative_cme_table = rb_id_table_create(16);
vm->overloaded_cme_table = st_init_numtable();
vm->constant_cache = rb_id_table_create(0);
Init_native_thread(th);
th->vm = vm;

Просмотреть файл

@ -229,44 +229,14 @@ struct iseq_inline_constant_cache_entry {
VALUE flags;
VALUE value; // v0
union ic_serial_entry ic_serial; // v1, v2
VALUE _unused1; // v1
VALUE _unused2; // v2
const rb_cref_t *ic_cref; // v3
};
STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
(offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
sizeof(const rb_cref_t *)) <= sizeof(struct RObject));
#if SIZEOF_SERIAL_T <= SIZEOF_VALUE
#define GET_IC_SERIAL(ice) (ice)->ic_serial.raw
#define SET_IC_SERIAL(ice, v) (ice)->ic_serial.raw = (v)
#else
static inline rb_serial_t
get_ic_serial(const struct iseq_inline_constant_cache_entry *ice)
{
union ic_serial_entry tmp;
tmp.data[0] = ice->ic_serial.data[0];
tmp.data[1] = ice->ic_serial.data[1];
return tmp.raw;
}
#define GET_IC_SERIAL(ice) get_ic_serial(ice)
static inline void
set_ic_serial(struct iseq_inline_constant_cache_entry *ice, rb_serial_t v)
{
union ic_serial_entry tmp;
tmp.raw = v;
ice->ic_serial.data[0] = tmp.data[0];
ice->ic_serial.data[1] = tmp.data[1];
}
#define SET_IC_SERIAL(ice, v) set_ic_serial((ice), (v))
#endif
struct iseq_inline_constant_cache {
struct iseq_inline_constant_cache_entry *entry;
// For YJIT: the index to the opt_getinlinecache instruction in the same iseq.
@ -722,6 +692,12 @@ typedef struct rb_vm_struct {
struct rb_id_table *negative_cme_table;
st_table *overloaded_cme_table; // cme -> overloaded_cme
// This id table contains a mapping from ID to ICs. It does this with ID
// keys and nested st_tables as values. The nested tables have ICs as keys
// and Qtrue as values. It is used when inline constant caches need to be
// invalidated or ISEQs are being freed.
struct rb_id_table *constant_cache;
#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
#endif

Просмотреть файл

@ -4926,13 +4926,47 @@ vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
// For each getconstant, associate the ID that corresponds to the first operand
// to that instruction with the inline cache.
static bool
vm_ic_compile_i(VALUE *code, VALUE insn, size_t index, void *ic)
{
if (insn == BIN(opt_setinlinecache)) {
return false;
}
if (insn == BIN(getconstant)) {
ID id = code[index + 1];
rb_vm_t *vm = GET_VM();
st_table *ics;
if (!rb_id_table_lookup(vm->constant_cache, id, (VALUE *) &ics)) {
ics = st_init_numtable();
rb_id_table_insert(vm->constant_cache, id, (VALUE) ics);
}
st_insert(ics, (st_data_t) ic, (st_data_t) Qtrue);
}
return true;
}
// Loop through the instruction sequences starting at the opt_getinlinecache
// call and gather up every getconstant's ID. Associate that with the VM's
// constant cache so that whenever one of the constants changes the inline cache
// will get busted.
static void
vm_ic_compile(rb_control_frame_t *cfp, IC ic)
{
const rb_iseq_t *iseq = cfp->iseq;
rb_iseq_each(iseq, cfp->pc - ISEQ_BODY(iseq)->iseq_encoded, vm_ic_compile_i, (void *) ic);
}
// For MJIT inlining
static inline bool
vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, rb_serial_t ic_serial, const VALUE *reg_ep)
vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
{
if (ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p())) {
if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
VM_ASSERT((flags & IMEMO_CONST_CACHE_SHAREABLE) ? rb_ractor_shareable_p(value) : true);
return (ic_cref == NULL || // no need to check CREF
@ -4945,7 +4979,7 @@ static bool
vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
{
VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, GET_IC_SERIAL(ice), reg_ep);
return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
}
// YJIT needs this function to never allocate and never raise
@ -4958,13 +4992,16 @@ rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
static void
vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep)
{
if (ruby_vm_const_missing_count > 0) {
ruby_vm_const_missing_count = 0;
ic->entry = NULL;
return;
}
struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)rb_imemo_new(imemo_constcache, 0, 0, 0, 0);
RB_OBJ_WRITE(ice, &ice->value, val);
ice->ic_cref = vm_get_const_key_cref(reg_ep);
SET_IC_SERIAL(ice, GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count);
if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
ruby_vm_const_missing_count = 0;
RB_OBJ_WRITE(iseq, &ic->entry, ice);
#ifndef MJIT_HEADER
// MJIT and YJIT can't be on at the same time, so there is no need to

Просмотреть файл

@ -14,7 +14,6 @@
MJIT_SYMBOL_EXPORT_BEGIN
RUBY_EXTERN VALUE ruby_vm_const_missing_count;
RUBY_EXTERN rb_serial_t ruby_vm_global_constant_state;
RUBY_EXTERN rb_serial_t ruby_vm_class_serial;
RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state;
@ -183,8 +182,6 @@ CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enable
#define PREV_CLASS_SERIAL() (ruby_vm_class_serial)
#define NEXT_CLASS_SERIAL() (++ruby_vm_class_serial)
#define GET_GLOBAL_CONSTANT_STATE() (ruby_vm_global_constant_state)
#define INC_GLOBAL_CONSTANT_STATE() (++ruby_vm_global_constant_state)
#define GET_GLOBAL_CVAR_STATE() (ruby_vm_global_cvar_state)
#define INC_GLOBAL_CVAR_STATE() (++ruby_vm_global_cvar_state)

Просмотреть файл

@ -126,11 +126,27 @@ vm_cme_invalidate(rb_callable_method_entry_t *cme)
rb_yjit_cme_invalidate((VALUE)cme);
}
void
rb_clear_constant_cache(void)
static int
rb_clear_constant_cache_for_id_i(st_data_t ic, st_data_t idx, st_data_t arg)
{
((IC) ic)->entry = NULL;
return ST_CONTINUE;
}
// Here for backward compat.
void rb_clear_constant_cache(void) {}
void
rb_clear_constant_cache_for_id(ID id)
{
rb_vm_t *vm = GET_VM();
st_table *ics;
if (rb_id_table_lookup(vm->constant_cache, id, (VALUE *) &ics)) {
st_foreach(ics, rb_clear_constant_cache_for_id_i, (st_data_t) NULL);
}
rb_yjit_constant_state_changed();
INC_GLOBAL_CONSTANT_STATE();
}
static void

Просмотреть файл

@ -4438,8 +4438,6 @@ gen_leave(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
return YJIT_END_BLOCK;
}
RUBY_EXTERN rb_serial_t ruby_vm_global_constant_state;
static codegen_status_t
gen_getglobal(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
{
@ -4707,8 +4705,7 @@ gen_opt_getinlinecache(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
// See vm_ic_hit_p(). The same conditions are checked in yjit_constant_ic_update().
struct iseq_inline_constant_cache_entry *ice = ic->entry;
if (!ice || // cache not filled
GET_IC_SERIAL(ice) != ruby_vm_global_constant_state /* cache out of date */) {
if (!ice) {
// In these cases, leave a block that unconditionally side exits
// for the interpreter to invalidate.
return YJIT_CANT_COMPILE;