зеркало из https://github.com/github/ruby.git
Make inline cache reads / writes atomic with object shapes
Prior to this commit, we were reading and writing ivar index and shape ID in inline caches in two separate instructions when getting and setting ivars. This meant there was a race condition with ractors and these caches where one ractor could change a value in the cache while another was still reading from it. This commit instead reads and writes shape ID and ivar index to inline caches atomically so there is no longer a race condition. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org> Co-Authored-By: John Hawthorn <john@hawthorn.email>
This commit is contained in:
Родитель
ad63b668e2
Коммит
913979bede
|
@ -351,20 +351,27 @@ module RubyVM::MJIT
|
|||
# _mjit_compile_ivar.erb
|
||||
def compile_ivar(insn_name, stack_size, pos, status, operands, body)
|
||||
ic_copy = (status.is_entries + (C.iseq_inline_storage_entry.new(operands[1]) - body.is_entries)).iv_cache
|
||||
dest_shape_id = ic_copy.value >> C.SHAPE_FLAG_SHIFT
|
||||
attr_index = ic_copy.value & ((1 << C.SHAPE_FLAG_SHIFT) - 1)
|
||||
source_shape_id = if dest_shape_id == C.INVALID_SHAPE_ID
|
||||
dest_shape_id
|
||||
else
|
||||
RubyVM::Shape.find_by_id(dest_shape_id).parent_id
|
||||
end
|
||||
|
||||
src = +''
|
||||
if !status.compile_info.disable_ivar_cache && ic_copy.source_shape_id != C.INVALID_SHAPE_ID
|
||||
if !status.compile_info.disable_ivar_cache && source_shape_id != C.INVALID_SHAPE_ID
|
||||
# JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
|
||||
# compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos)
|
||||
|
||||
# JIT: prepare vm_getivar/vm_setivar arguments and variables
|
||||
src << "{\n"
|
||||
src << " VALUE obj = GET_SELF();\n"
|
||||
src << " const shape_id_t source_shape_id = (rb_serial_t)#{ic_copy.source_shape_id};\n"
|
||||
# JIT: cache hit path of vm_getivar/vm_setivar, or cancel JIT (recompile it with exivar)
|
||||
if insn_name == :setinstancevariable
|
||||
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
|
||||
src << " const shape_id_t dest_shape_id = (rb_serial_t)#{ic_copy.dest_shape_id};\n"
|
||||
src << " const shape_id_t source_shape_id = (shape_id_t)#{source_shape_id};\n"
|
||||
src << " const uint32_t index = #{attr_index - 1};\n"
|
||||
src << " const shape_id_t dest_shape_id = (shape_id_t)#{dest_shape_id};\n"
|
||||
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj) && \n"
|
||||
src << " dest_shape_id != ROBJECT_SHAPE_ID(obj)) {\n"
|
||||
src << " if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {\n"
|
||||
|
@ -374,14 +381,19 @@ module RubyVM::MJIT
|
|||
src << " VALUE *ptr = ROBJECT_IVPTR(obj);\n"
|
||||
src << " RB_OBJ_WRITE(obj, &ptr[index], stack[#{stack_size - 1}]);\n"
|
||||
src << " }\n"
|
||||
src << " else if (dest_shape_id == ROBJECT_SHAPE_ID(obj)) {\n"
|
||||
src << " VALUE *ptr = ROBJECT_IVPTR(obj);\n"
|
||||
src << " RB_OBJ_WRITE(obj, &ptr[index], stack[#{stack_size - 1}]);\n"
|
||||
src << " }\n"
|
||||
else
|
||||
if ic_copy.attr_index == 0 # cache hit, but uninitialized iv
|
||||
src << " const shape_id_t source_shape_id = (shape_id_t)#{dest_shape_id};\n"
|
||||
if attr_index == 0 # cache hit, but uninitialized iv
|
||||
src << " /* Uninitialized instance variable */\n"
|
||||
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj)) {\n"
|
||||
src << " stack[#{stack_size}] = Qnil;\n"
|
||||
src << " }\n"
|
||||
else
|
||||
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
|
||||
src << " const uint32_t index = #{attr_index - 1};\n"
|
||||
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj)) {\n"
|
||||
src << " stack[#{stack_size}] = ROBJECT_IVPTR(obj)[index];\n"
|
||||
src << " }\n"
|
||||
|
@ -394,15 +406,15 @@ module RubyVM::MJIT
|
|||
src << " }\n"
|
||||
src << "}\n"
|
||||
return src
|
||||
elsif insn_name == :getinstancevariable && !status.compile_info.disable_exivar_cache && ic_copy.source_shape_id != C.INVALID_SHAPE_ID
|
||||
elsif insn_name == :getinstancevariable && !status.compile_info.disable_exivar_cache && source_shape_id != C.INVALID_SHAPE_ID
|
||||
# JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
|
||||
# compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos)
|
||||
|
||||
# JIT: prepare vm_getivar's arguments and variables
|
||||
src << "{\n"
|
||||
src << " VALUE obj = GET_SELF();\n"
|
||||
src << " const shape_id_t source_shape_id = (rb_serial_t)#{ic_copy.source_shape_id};\n"
|
||||
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
|
||||
src << " const shape_id_t source_shape_id = (shape_id_t)#{dest_shape_id};\n"
|
||||
src << " const uint32_t index = #{attr_index - 1};\n"
|
||||
# JIT: cache hit path of vm_getivar, or cancel JIT (recompile it without any ivar optimization)
|
||||
src << " struct gen_ivtbl *ivtbl;\n"
|
||||
src << " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && source_shape_id == rb_shape_get_shape_id(obj) && rb_ivar_generic_ivtbl_lookup(obj, &ivtbl))) {\n"
|
||||
|
|
11
mjit_c.rb
11
mjit_c.rb
|
@ -9,6 +9,10 @@ module RubyVM::MJIT
|
|||
RubyVM::Shape::SHAPE_BITS
|
||||
end
|
||||
|
||||
def SHAPE_FLAG_SHIFT
|
||||
RubyVM::Shape::SHAPE_FLAG_SHIFT
|
||||
end
|
||||
|
||||
def ROBJECT_EMBED_LEN_MAX
|
||||
Primitive.cexpr! 'INT2NUM(RBIMPL_EMBED_LEN_MAX_OF(VALUE))'
|
||||
end
|
||||
|
@ -255,9 +259,7 @@ module RubyVM::MJIT
|
|||
def C.iseq_inline_iv_cache_entry
|
||||
@iseq_inline_iv_cache_entry ||= CType::Struct.new(
|
||||
"iseq_inline_iv_cache_entry", Primitive.cexpr!("SIZEOF(struct iseq_inline_iv_cache_entry)"),
|
||||
source_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), source_shape_id)")],
|
||||
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), dest_shape_id)")],
|
||||
attr_index: [self.attr_index_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), attr_index)")],
|
||||
value: [CType::Immediate.parse("uintptr_t"), Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), value)")],
|
||||
)
|
||||
end
|
||||
|
||||
|
@ -332,8 +334,7 @@ module RubyVM::MJIT
|
|||
"", Primitive.cexpr!("SIZEOF(((struct rb_callcache *)NULL)->aux_)"),
|
||||
attr: CType::Struct.new(
|
||||
"", Primitive.cexpr!("SIZEOF(((struct rb_callcache *)NULL)->aux_.attr)"),
|
||||
index: [self.attr_index_t, Primitive.cexpr!("OFFSETOF(((struct rb_callcache *)NULL)->aux_.attr, index)")],
|
||||
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF(((struct rb_callcache *)NULL)->aux_.attr, dest_shape_id)")],
|
||||
value: [CType::Immediate.parse("uintptr_t"), Primitive.cexpr!("OFFSETOF(((struct rb_callcache *)NULL)->aux_.attr, value)")],
|
||||
),
|
||||
method_missing_reason: self.method_missing_reason,
|
||||
v: self.VALUE,
|
||||
|
|
2
object.c
2
object.c
|
@ -319,7 +319,7 @@ init_copy(VALUE dest, VALUE obj)
|
|||
// If the object is frozen, the "dup"'d object will *not* be frozen,
|
||||
// so we need to copy the frozen shape's parent to the new object.
|
||||
if (rb_shape_frozen_shape_p(shape_to_set)) {
|
||||
shape_to_set = shape_to_set->parent;
|
||||
shape_to_set = rb_shape_get_shape_by_id(shape_to_set->parent_id);
|
||||
}
|
||||
|
||||
// shape ids are different
|
||||
|
|
80
shape.c
80
shape.c
|
@ -91,7 +91,7 @@ rb_shape_get_shape(VALUE obj)
|
|||
|
||||
static rb_shape_t *
|
||||
rb_shape_lookup_id(rb_shape_t* shape, ID id, enum shape_type shape_type) {
|
||||
while (shape->parent) {
|
||||
while (shape->parent_id != INVALID_SHAPE_ID) {
|
||||
if (shape->edge_name == id) {
|
||||
// If the shape type is different, we don't
|
||||
// want this to count as a "found" ID
|
||||
|
@ -102,7 +102,7 @@ rb_shape_lookup_id(rb_shape_t* shape, ID id, enum shape_type shape_type) {
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
shape = shape->parent;
|
||||
shape = rb_shape_get_shape_by_id(shape->parent_id);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ get_next_shape_internal(rb_shape_t* shape, ID id, VALUE obj, enum shape_type sha
|
|||
// In this case, the shape exists, but the shape is garbage, so we need to recreate it
|
||||
if (res) {
|
||||
rb_id_table_delete(shape->edges, id);
|
||||
res->parent = NULL;
|
||||
res->parent_id = INVALID_SHAPE_ID;
|
||||
}
|
||||
|
||||
rb_shape_t * new_shape = rb_shape_alloc(id, shape);
|
||||
|
@ -138,7 +138,7 @@ get_next_shape_internal(rb_shape_t* shape, ID id, VALUE obj, enum shape_type sha
|
|||
|
||||
switch(shape_type) {
|
||||
case SHAPE_IVAR:
|
||||
new_shape->iv_count = new_shape->parent->iv_count + 1;
|
||||
new_shape->iv_count = rb_shape_get_shape_by_id(new_shape->parent_id)->iv_count + 1;
|
||||
|
||||
// Check if we should update max_iv_count on the object's class
|
||||
if (BUILTIN_TYPE(obj) == T_OBJECT) {
|
||||
|
@ -150,7 +150,7 @@ get_next_shape_internal(rb_shape_t* shape, ID id, VALUE obj, enum shape_type sha
|
|||
break;
|
||||
case SHAPE_IVAR_UNDEF:
|
||||
case SHAPE_FROZEN:
|
||||
new_shape->iv_count = new_shape->parent->iv_count;
|
||||
new_shape->iv_count = rb_shape_get_shape_by_id(new_shape->parent_id)->iv_count;
|
||||
break;
|
||||
case SHAPE_ROOT:
|
||||
rb_bug("Unreachable");
|
||||
|
@ -240,7 +240,7 @@ rb_shape_get_next(rb_shape_t* shape, VALUE obj, ID id)
|
|||
|
||||
bool
|
||||
rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t *value) {
|
||||
while (shape->parent) {
|
||||
while (shape->parent_id != INVALID_SHAPE_ID) {
|
||||
if (shape->edge_name == id) {
|
||||
enum shape_type shape_type;
|
||||
shape_type = (enum shape_type)shape->type;
|
||||
|
@ -257,7 +257,7 @@ rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t *value) {
|
|||
rb_bug("Ivar should not exist on frozen transition\n");
|
||||
}
|
||||
}
|
||||
shape = shape->parent;
|
||||
shape = rb_shape_get_shape_by_id(shape->parent_id);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -278,17 +278,23 @@ shape_alloc(void)
|
|||
}
|
||||
|
||||
rb_shape_t *
|
||||
rb_shape_alloc(ID edge_name, rb_shape_t * parent)
|
||||
rb_shape_alloc_with_parent_id(ID edge_name, shape_id_t parent_id)
|
||||
{
|
||||
rb_shape_t * shape = shape_alloc();
|
||||
|
||||
shape->edge_name = edge_name;
|
||||
shape->iv_count = 0;
|
||||
shape->parent = parent;
|
||||
shape->parent_id = parent_id;
|
||||
|
||||
return shape;
|
||||
}
|
||||
|
||||
rb_shape_t *
|
||||
rb_shape_alloc(ID edge_name, rb_shape_t * parent)
|
||||
{
|
||||
return rb_shape_alloc_with_parent_id(edge_name, rb_shape_id(parent));
|
||||
}
|
||||
|
||||
MJIT_FUNC_EXPORTED void
|
||||
rb_shape_set_shape(VALUE obj, rb_shape_t* shape)
|
||||
{
|
||||
|
@ -325,8 +331,8 @@ rb_shape_parent_id(VALUE self)
|
|||
{
|
||||
rb_shape_t * shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
if (shape->parent) {
|
||||
return INT2NUM(rb_shape_id(shape->parent));
|
||||
if (shape->parent_id != INVALID_SHAPE_ID) {
|
||||
return INT2NUM(shape->parent_id);
|
||||
}
|
||||
else {
|
||||
return Qnil;
|
||||
|
@ -402,9 +408,9 @@ rb_shape_export_depth(VALUE self)
|
|||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
|
||||
unsigned int depth = 0;
|
||||
while (shape->parent) {
|
||||
while (shape->parent_id != INVALID_SHAPE_ID) {
|
||||
depth++;
|
||||
shape = shape->parent;
|
||||
shape = rb_shape_get_shape_by_id(shape->parent_id);
|
||||
}
|
||||
return INT2NUM(depth);
|
||||
}
|
||||
|
@ -414,8 +420,8 @@ rb_shape_parent(VALUE self)
|
|||
{
|
||||
rb_shape_t * shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
if (shape->parent) {
|
||||
return rb_shape_t_to_rb_cShape(shape->parent);
|
||||
if (shape->parent_id != INVALID_SHAPE_ID) {
|
||||
return rb_shape_t_to_rb_cShape(rb_shape_get_shape_by_id(shape->parent_id));
|
||||
}
|
||||
else {
|
||||
return Qnil;
|
||||
|
@ -426,11 +432,11 @@ VALUE rb_shape_debug_shape(VALUE self, VALUE obj) {
|
|||
return rb_shape_t_to_rb_cShape(rb_shape_get_shape(obj));
|
||||
}
|
||||
|
||||
VALUE rb_shape_debug_root_shape(VALUE self) {
|
||||
VALUE rb_shape_root_shape(VALUE self) {
|
||||
return rb_shape_t_to_rb_cShape(rb_shape_get_root_shape());
|
||||
}
|
||||
|
||||
VALUE rb_shape_debug_frozen_root_shape(VALUE self) {
|
||||
VALUE rb_shape_frozen_root_shape(VALUE self) {
|
||||
return rb_shape_t_to_rb_cShape(rb_shape_get_frozen_root_shape());
|
||||
}
|
||||
|
||||
|
@ -460,7 +466,7 @@ VALUE rb_obj_shape(rb_shape_t* shape) {
|
|||
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(ROOT_SHAPE_ID));
|
||||
}
|
||||
else {
|
||||
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(rb_shape_id(shape->parent)));
|
||||
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(shape->parent_id));
|
||||
}
|
||||
|
||||
rb_hash_aset(rb_shape, ID2SYM(rb_intern("edge_name")), rb_id2str(shape->edge_name));
|
||||
|
@ -471,20 +477,7 @@ static VALUE shape_transition_tree(VALUE self) {
|
|||
return rb_obj_shape(rb_shape_get_root_shape());
|
||||
}
|
||||
|
||||
static VALUE shape_count(VALUE self) {
|
||||
int shape_count = 0;
|
||||
rb_vm_t *vm = GET_VM();
|
||||
for(shape_id_t i = 0; i < vm->next_shape_id; i++) {
|
||||
if(rb_shape_get_shape_by_id_without_assertion(i)) {
|
||||
shape_count++;
|
||||
}
|
||||
}
|
||||
return INT2NUM(shape_count);
|
||||
}
|
||||
|
||||
static VALUE
|
||||
shape_max_shape_count(VALUE self)
|
||||
{
|
||||
static VALUE next_shape_id(VALUE self) {
|
||||
return INT2NUM(GET_VM()->next_shape_id);
|
||||
}
|
||||
|
||||
|
@ -494,6 +487,16 @@ rb_shape_flags_mask(void)
|
|||
return SHAPE_FLAG_MASK;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
rb_shape_find_by_id(VALUE mod, VALUE id)
|
||||
{
|
||||
shape_id_t shape_id = NUM2INT(id);
|
||||
if (shape_id < 0 || shape_id >= GET_VM()->next_shape_id) {
|
||||
rb_raise(rb_eArgError, "Shape ID %d is out of bounds\n", shape_id);
|
||||
}
|
||||
return rb_shape_t_to_rb_cShape(rb_shape_get_shape_by_id(shape_id));
|
||||
}
|
||||
|
||||
void
|
||||
Init_shape(void)
|
||||
{
|
||||
|
@ -513,11 +516,12 @@ Init_shape(void)
|
|||
rb_define_const(rb_cShape, "SHAPE_IVAR_UNDEF", INT2NUM(SHAPE_IVAR_UNDEF));
|
||||
rb_define_const(rb_cShape, "SHAPE_FROZEN", INT2NUM(SHAPE_FROZEN));
|
||||
rb_define_const(rb_cShape, "SHAPE_BITS", INT2NUM(SHAPE_BITS));
|
||||
rb_define_const(rb_cShape, "SHAPE_FLAG_SHIFT", INT2NUM(SHAPE_FLAG_SHIFT));
|
||||
|
||||
rb_define_module_function(rb_cRubyVM, "debug_shape_transition_tree", shape_transition_tree, 0);
|
||||
rb_define_module_function(rb_cRubyVM, "debug_shape_count", shape_count, 0);
|
||||
rb_define_singleton_method(rb_cRubyVM, "debug_shape", rb_shape_debug_shape, 1);
|
||||
rb_define_singleton_method(rb_cRubyVM, "debug_max_shape_count", shape_max_shape_count, 0);
|
||||
rb_define_singleton_method(rb_cRubyVM, "debug_root_shape", rb_shape_debug_root_shape, 0);
|
||||
rb_define_singleton_method(rb_cRubyVM, "debug_frozen_root_shape", rb_shape_debug_frozen_root_shape, 0);
|
||||
rb_define_singleton_method(rb_cShape, "transition_tree", shape_transition_tree, 0);
|
||||
rb_define_singleton_method(rb_cShape, "find_by_id", rb_shape_find_by_id, 1);
|
||||
rb_define_singleton_method(rb_cShape, "next_shape_id", next_shape_id, 0);
|
||||
rb_define_singleton_method(rb_cShape, "of", rb_shape_debug_shape, 1);
|
||||
rb_define_singleton_method(rb_cShape, "root_shape", rb_shape_root_shape, 0);
|
||||
rb_define_singleton_method(rb_cShape, "frozen_root_shape", rb_shape_frozen_root_shape, 0);
|
||||
}
|
||||
|
|
18
shape.h
18
shape.h
|
@ -43,11 +43,11 @@ typedef uint16_t shape_id_t;
|
|||
# define FROZEN_ROOT_SHAPE_ID 0x1
|
||||
|
||||
struct rb_shape {
|
||||
struct rb_shape * parent; // Pointer to the parent
|
||||
struct rb_id_table * edges; // id_table from ID (ivar) to next shape
|
||||
ID edge_name; // ID (ivar) for transition from parent to rb_shape
|
||||
attr_index_t iv_count;
|
||||
uint8_t type;
|
||||
shape_id_t parent_id;
|
||||
};
|
||||
|
||||
typedef struct rb_shape rb_shape_t;
|
||||
|
@ -59,21 +59,6 @@ enum shape_type {
|
|||
SHAPE_IVAR_UNDEF,
|
||||
};
|
||||
|
||||
static inline shape_id_t
|
||||
IMEMO_CACHED_SHAPE_ID(VALUE cc)
|
||||
{
|
||||
RBIMPL_ASSERT_TYPE((VALUE)cc, RUBY_T_IMEMO);
|
||||
return (shape_id_t)(SHAPE_MASK & (RBASIC(cc)->flags >> SHAPE_FLAG_SHIFT));
|
||||
}
|
||||
|
||||
static inline void
|
||||
IMEMO_SET_CACHED_SHAPE_ID(VALUE cc, shape_id_t shape_id)
|
||||
{
|
||||
RBIMPL_ASSERT_TYPE((VALUE)cc, RUBY_T_IMEMO);
|
||||
RBASIC(cc)->flags &= SHAPE_FLAG_MASK;
|
||||
RBASIC(cc)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
|
||||
}
|
||||
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
static inline shape_id_t
|
||||
RBASIC_SHAPE_ID(VALUE obj)
|
||||
|
@ -141,6 +126,7 @@ shape_id_t rb_shape_id(rb_shape_t * shape);
|
|||
MJIT_SYMBOL_EXPORT_END
|
||||
|
||||
rb_shape_t * rb_shape_alloc(ID edge_name, rb_shape_t * parent);
|
||||
rb_shape_t * rb_shape_alloc_with_parent_id(ID edge_name, shape_id_t parent_id);
|
||||
|
||||
bool rb_shape_set_shape_id(VALUE obj, shape_id_t shape_id);
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ class TestShapes < Test::Unit::TestCase
|
|||
end
|
||||
end
|
||||
|
||||
# RubyVM.debug_shape returns new instances of shape objects for
|
||||
# RubyVM::Shape.of returns new instances of shape objects for
|
||||
# each call. This helper method allows us to define equality for
|
||||
# shapes
|
||||
def assert_shape_equal(shape1, shape2)
|
||||
|
@ -39,63 +39,63 @@ class TestShapes < Test::Unit::TestCase
|
|||
|
||||
def test_iv_index
|
||||
example = RemoveAndAdd.new
|
||||
shape = RubyVM.debug_shape(example)
|
||||
shape = RubyVM::Shape.of(example)
|
||||
assert_equal 0, shape.iv_count
|
||||
|
||||
example.add_foo # makes a transition
|
||||
new_shape = RubyVM.debug_shape(example)
|
||||
new_shape = RubyVM::Shape.of(example)
|
||||
assert_equal([:@foo], example.instance_variables)
|
||||
assert_equal(shape.id, new_shape.parent.id)
|
||||
assert_equal(1, new_shape.iv_count)
|
||||
|
||||
example.remove # makes a transition
|
||||
remove_shape = RubyVM.debug_shape(example)
|
||||
remove_shape = RubyVM::Shape.of(example)
|
||||
assert_equal([], example.instance_variables)
|
||||
assert_equal(new_shape.id, remove_shape.parent.id)
|
||||
assert_equal(1, remove_shape.iv_count)
|
||||
|
||||
example.add_bar # makes a transition
|
||||
bar_shape = RubyVM.debug_shape(example)
|
||||
bar_shape = RubyVM::Shape.of(example)
|
||||
assert_equal([:@bar], example.instance_variables)
|
||||
assert_equal(remove_shape.id, bar_shape.parent.id)
|
||||
assert_equal(2, bar_shape.iv_count)
|
||||
end
|
||||
|
||||
def test_new_obj_has_root_shape
|
||||
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(Object.new))
|
||||
assert_shape_equal(RubyVM::Shape.root_shape, RubyVM::Shape.of(Object.new))
|
||||
end
|
||||
|
||||
def test_frozen_new_obj_has_frozen_root_shape
|
||||
assert_shape_equal(
|
||||
RubyVM.debug_frozen_root_shape,
|
||||
RubyVM.debug_shape(Object.new.freeze)
|
||||
RubyVM::Shape.frozen_root_shape,
|
||||
RubyVM::Shape.of(Object.new.freeze)
|
||||
)
|
||||
end
|
||||
|
||||
def test_str_has_root_shape
|
||||
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(""))
|
||||
assert_shape_equal(RubyVM::Shape.root_shape, RubyVM::Shape.of(""))
|
||||
end
|
||||
|
||||
def test_array_has_root_shape
|
||||
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape([]))
|
||||
assert_shape_equal(RubyVM::Shape.root_shape, RubyVM::Shape.of([]))
|
||||
end
|
||||
|
||||
def test_hash_has_root_shape
|
||||
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape({}))
|
||||
assert_shape_equal(RubyVM::Shape.root_shape, RubyVM::Shape.of({}))
|
||||
end
|
||||
|
||||
def test_true_has_frozen_root_shape
|
||||
assert_shape_equal(RubyVM.debug_frozen_root_shape, RubyVM.debug_shape(true))
|
||||
assert_shape_equal(RubyVM::Shape.frozen_root_shape, RubyVM::Shape.of(true))
|
||||
end
|
||||
|
||||
def test_nil_has_frozen_root_shape
|
||||
assert_shape_equal(RubyVM.debug_frozen_root_shape, RubyVM.debug_shape(nil))
|
||||
assert_shape_equal(RubyVM::Shape.frozen_root_shape, RubyVM::Shape.of(nil))
|
||||
end
|
||||
|
||||
def test_basic_shape_transition
|
||||
obj = Example.new
|
||||
refute_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(obj))
|
||||
assert_shape_equal(RubyVM.debug_root_shape.edges[:@a], RubyVM.debug_shape(obj))
|
||||
refute_equal(RubyVM::Shape.root_shape, RubyVM::Shape.of(obj))
|
||||
assert_shape_equal(RubyVM::Shape.root_shape.edges[:@a], RubyVM::Shape.of(obj))
|
||||
assert_equal(obj.instance_variable_get(:@a), 1)
|
||||
end
|
||||
|
||||
|
@ -103,13 +103,13 @@ class TestShapes < Test::Unit::TestCase
|
|||
obj = Example.new
|
||||
obj2 = ""
|
||||
obj2.instance_variable_set(:@a, 1)
|
||||
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
assert_shape_equal(RubyVM::Shape.of(obj), RubyVM::Shape.of(obj2))
|
||||
end
|
||||
|
||||
def test_duplicating_objects
|
||||
obj = Example.new
|
||||
obj2 = obj.dup
|
||||
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
assert_shape_equal(RubyVM::Shape.of(obj), RubyVM::Shape.of(obj2))
|
||||
end
|
||||
|
||||
def test_freezing_and_duplicating_object
|
||||
|
@ -118,14 +118,14 @@ class TestShapes < Test::Unit::TestCase
|
|||
refute_predicate(obj2, :frozen?)
|
||||
# dup'd objects shouldn't be frozen, and the shape should be the
|
||||
# parent shape of the copied object
|
||||
assert_equal(RubyVM.debug_shape(obj).parent.id, RubyVM.debug_shape(obj2).id)
|
||||
assert_equal(RubyVM::Shape.of(obj).parent.id, RubyVM::Shape.of(obj2).id)
|
||||
end
|
||||
|
||||
def test_freezing_and_duplicating_object_with_ivars
|
||||
obj = Example.new.freeze
|
||||
obj2 = obj.dup
|
||||
refute_predicate(obj2, :frozen?)
|
||||
refute_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
refute_shape_equal(RubyVM::Shape.of(obj), RubyVM::Shape.of(obj2))
|
||||
assert_equal(obj2.instance_variable_get(:@a), 1)
|
||||
end
|
||||
|
||||
|
@ -135,7 +135,7 @@ class TestShapes < Test::Unit::TestCase
|
|||
str.freeze
|
||||
str2 = str.dup
|
||||
refute_predicate(str2, :frozen?)
|
||||
refute_equal(RubyVM.debug_shape(str).id, RubyVM.debug_shape(str2).id)
|
||||
refute_equal(RubyVM::Shape.of(str).id, RubyVM::Shape.of(str2).id)
|
||||
assert_equal(str2.instance_variable_get(:@a), 1)
|
||||
end
|
||||
|
||||
|
@ -143,14 +143,14 @@ class TestShapes < Test::Unit::TestCase
|
|||
obj = Object.new.freeze
|
||||
obj2 = obj.clone(freeze: true)
|
||||
assert_predicate(obj2, :frozen?)
|
||||
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
assert_shape_equal(RubyVM::Shape.of(obj), RubyVM::Shape.of(obj2))
|
||||
end
|
||||
|
||||
def test_freezing_and_cloning_object_with_ivars
|
||||
obj = Example.new.freeze
|
||||
obj2 = obj.clone(freeze: true)
|
||||
assert_predicate(obj2, :frozen?)
|
||||
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
assert_shape_equal(RubyVM::Shape.of(obj), RubyVM::Shape.of(obj2))
|
||||
assert_equal(obj2.instance_variable_get(:@a), 1)
|
||||
end
|
||||
|
||||
|
@ -158,7 +158,7 @@ class TestShapes < Test::Unit::TestCase
|
|||
str = "str".freeze
|
||||
str2 = str.clone(freeze: true)
|
||||
assert_predicate(str2, :frozen?)
|
||||
assert_shape_equal(RubyVM.debug_shape(str), RubyVM.debug_shape(str2))
|
||||
assert_shape_equal(RubyVM::Shape.of(str), RubyVM::Shape.of(str2))
|
||||
end
|
||||
|
||||
def test_freezing_and_cloning_string_with_ivars
|
||||
|
@ -167,7 +167,16 @@ class TestShapes < Test::Unit::TestCase
|
|||
str.freeze
|
||||
str2 = str.clone(freeze: true)
|
||||
assert_predicate(str2, :frozen?)
|
||||
assert_shape_equal(RubyVM.debug_shape(str), RubyVM.debug_shape(str2))
|
||||
assert_shape_equal(RubyVM::Shape.of(str), RubyVM::Shape.of(str2))
|
||||
assert_equal(str2.instance_variable_get(:@a), 1)
|
||||
end
|
||||
|
||||
def test_out_of_bounds_shape
|
||||
assert_raise ArgumentError do
|
||||
RubyVM::Shape.find_by_id(RubyVM::Shape.next_shape_id)
|
||||
end
|
||||
assert_raise ArgumentError do
|
||||
RubyVM::Shape.find_by_id(-1)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1627,7 +1627,7 @@ iterate_over_shapes_with_callback(rb_shape_t *shape, VALUE* iv_list, rb_ivar_for
|
|||
case SHAPE_ROOT:
|
||||
return;
|
||||
case SHAPE_IVAR:
|
||||
iterate_over_shapes_with_callback(shape->parent, iv_list, callback, arg);
|
||||
iterate_over_shapes_with_callback(rb_shape_get_shape_by_id(shape->parent_id), iv_list, callback, arg);
|
||||
VALUE val = iv_list[shape->iv_count - 1];
|
||||
if (val != Qundef) {
|
||||
callback(shape->edge_name, val, arg);
|
||||
|
@ -1635,7 +1635,7 @@ iterate_over_shapes_with_callback(rb_shape_t *shape, VALUE* iv_list, rb_ivar_for
|
|||
return;
|
||||
case SHAPE_IVAR_UNDEF:
|
||||
case SHAPE_FROZEN:
|
||||
iterate_over_shapes_with_callback(shape->parent, iv_list, callback, arg);
|
||||
iterate_over_shapes_with_callback(rb_shape_get_shape_by_id(shape->parent_id), iv_list, callback, arg);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1694,7 +1694,7 @@ rb_copy_generic_ivar(VALUE clone, VALUE obj)
|
|||
|
||||
rb_shape_t * obj_shape = rb_shape_get_shape(obj);
|
||||
if (rb_shape_frozen_shape_p(obj_shape)) {
|
||||
rb_shape_set_shape(clone, obj_shape->parent);
|
||||
rb_shape_set_shape_id(clone, obj_shape->parent_id);
|
||||
}
|
||||
else {
|
||||
rb_shape_set_shape(clone, obj_shape);
|
||||
|
|
4
vm.c
4
vm.c
|
@ -4054,11 +4054,11 @@ Init_vm_objects(void)
|
|||
}
|
||||
|
||||
// Root shape
|
||||
vm->root_shape = rb_shape_alloc(0, 0);
|
||||
vm->root_shape = rb_shape_alloc_with_parent_id(0, INVALID_SHAPE_ID);
|
||||
RUBY_ASSERT(rb_shape_id(vm->root_shape) == ROOT_SHAPE_ID);
|
||||
|
||||
// Frozen root shape
|
||||
vm->frozen_root_shape = rb_shape_alloc(rb_make_internal_id(), vm->root_shape);
|
||||
vm->frozen_root_shape = rb_shape_alloc_with_parent_id(rb_make_internal_id(), rb_shape_id(vm->root_shape));
|
||||
vm->frozen_root_shape->type = (uint8_t)SHAPE_FROZEN;
|
||||
RUBY_ASSERT(rb_shape_id(vm->frozen_root_shape) == FROZEN_ROOT_SHAPE_ID);
|
||||
|
||||
|
|
|
@ -286,8 +286,7 @@ struct rb_callcache {
|
|||
|
||||
union {
|
||||
struct {
|
||||
const attr_index_t index;
|
||||
shape_id_t dest_shape_id;
|
||||
uintptr_t value; // Shape ID in upper bits, index in lower bits
|
||||
} attr;
|
||||
const enum method_missing_reason method_missing_reason; /* used by method_missing */
|
||||
VALUE v;
|
||||
|
@ -307,9 +306,7 @@ vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
|
|||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
VM_ASSERT(cc != vm_cc_empty());
|
||||
IMEMO_SET_CACHED_SHAPE_ID((VALUE)cc, shape_id);
|
||||
*(attr_index_t *)&cc->aux_.attr.index = 0;
|
||||
*(shape_id_t *)&cc->aux_.attr.dest_shape_id = shape_id;
|
||||
*(uintptr_t *)&cc->aux_.attr.value = (uintptr_t)(shape_id) << SHAPE_FLAG_SHIFT;
|
||||
}
|
||||
|
||||
static inline const struct rb_callcache *
|
||||
|
@ -374,29 +371,7 @@ static inline attr_index_t
|
|||
vm_cc_attr_index(const struct rb_callcache *cc)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
return cc->aux_.attr.index - 1;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
vm_cc_attr_index_p(const struct rb_callcache *cc)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
return cc->aux_.attr.index != 0;
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_cc_attr_index_source_shape_id(const struct rb_callcache *cc)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
|
||||
return IMEMO_CACHED_SHAPE_ID((VALUE)cc);
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_cc_attr_shape_id(const struct rb_callcache *cc)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
return vm_cc_attr_index_source_shape_id(cc);
|
||||
return (attr_index_t)((cc->aux_.attr.value & SHAPE_FLAG_MASK) - 1);
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
|
@ -404,37 +379,31 @@ vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
|
|||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
|
||||
return cc->aux_.attr.dest_shape_id;
|
||||
return cc->aux_.attr.value >> SHAPE_FLAG_SHIFT;
|
||||
}
|
||||
|
||||
static inline attr_index_t
|
||||
vm_ic_attr_index(const struct iseq_inline_iv_cache_entry *ic)
|
||||
static inline void
|
||||
vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t * shape_id, attr_index_t * index)
|
||||
{
|
||||
return ic->attr_index - 1;
|
||||
uintptr_t cache_value = cc->aux_.attr.value; // Atomically read 64 bits
|
||||
*shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
|
||||
*index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
|
||||
return;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
vm_ic_attr_index_p(const struct iseq_inline_iv_cache_entry *ic)
|
||||
static inline void
|
||||
vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry *ic, shape_id_t * shape_id, attr_index_t * index)
|
||||
{
|
||||
return ic->attr_index > 0;
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_ic_attr_shape_id(const struct iseq_inline_iv_cache_entry *ic)
|
||||
{
|
||||
return ic->source_shape_id;
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_ic_attr_index_source_shape_id(const struct iseq_inline_iv_cache_entry *ic)
|
||||
{
|
||||
return ic->source_shape_id;
|
||||
uintptr_t cache_value = ic->value; // Atomically read 64 bits
|
||||
*shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
|
||||
*index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
|
||||
return;
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry *ic)
|
||||
{
|
||||
return ic->dest_shape_id;
|
||||
return (shape_id_t)(ic->value >> SHAPE_FLAG_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
|
@ -479,29 +448,23 @@ vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
|
|||
}
|
||||
|
||||
static inline void
|
||||
vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t source_shape_id, shape_id_t dest_shape_id)
|
||||
vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
VM_ASSERT(cc != vm_cc_empty());
|
||||
IMEMO_SET_CACHED_SHAPE_ID((VALUE)cc, source_shape_id);
|
||||
*(attr_index_t *)&cc->aux_.attr.index = (index + 1);
|
||||
*(shape_id_t *)&cc->aux_.attr.dest_shape_id = dest_shape_id;
|
||||
*(uintptr_t *)&cc->aux_.attr.value = (index + 1) | ((uintptr_t)(dest_shape_id) << SHAPE_FLAG_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t source_shape_id, shape_id_t dest_shape_id)
|
||||
vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id)
|
||||
{
|
||||
*(shape_id_t *)&ic->source_shape_id = source_shape_id;
|
||||
*(shape_id_t *)&ic->dest_shape_id = dest_shape_id;
|
||||
*(attr_index_t *)&ic->attr_index = index + 1;
|
||||
*(uintptr_t *)&ic->value = ((uintptr_t)dest_shape_id << SHAPE_FLAG_SHIFT) | (index + 1);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
|
||||
{
|
||||
*(shape_id_t *)&ic->source_shape_id = shape_id;
|
||||
*(shape_id_t *)&ic->dest_shape_id = shape_id;
|
||||
*(attr_index_t *)&ic->attr_index = 0;
|
||||
*(uintptr_t *)&ic->value = (uintptr_t)shape_id << SHAPE_FLAG_SHIFT;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
|
@ -273,9 +273,7 @@ struct iseq_inline_constant_cache {
|
|||
};
|
||||
|
||||
struct iseq_inline_iv_cache_entry {
|
||||
shape_id_t source_shape_id;
|
||||
shape_id_t dest_shape_id;
|
||||
attr_index_t attr_index;
|
||||
uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
|
||||
};
|
||||
|
||||
struct iseq_inline_cvar_cache_entry {
|
||||
|
|
164
vm_insnhelper.c
164
vm_insnhelper.c
|
@ -52,7 +52,7 @@ ruby_vm_special_exception_copy(VALUE exc)
|
|||
VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
|
||||
rb_shape_t * shape = rb_shape_get_shape(exc);
|
||||
if (rb_shape_frozen_shape_p(shape)) {
|
||||
shape = shape->parent;
|
||||
shape = rb_shape_get_shape_by_id(shape->parent_id);
|
||||
}
|
||||
rb_shape_set_shape(e, shape);
|
||||
rb_obj_copy_ivar(e, exc);
|
||||
|
@ -1097,11 +1097,11 @@ fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, in
|
|||
{
|
||||
if (is_attr) {
|
||||
if (vm_cc_markable(cc)) {
|
||||
vm_cc_attr_index_set(cc, index, shape_id, shape_id);
|
||||
vm_cc_attr_index_set(cc, index, shape_id);
|
||||
}
|
||||
}
|
||||
else {
|
||||
vm_ic_attr_index_set(iseq, ic, index, shape_id, shape_id);
|
||||
vm_ic_attr_index_set(iseq, ic, index, shape_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1110,6 +1110,8 @@ fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, in
|
|||
#define ractor_object_incidental_shareable_p(obj, val) \
|
||||
ractor_incidental_shareable_p(rb_ractor_shareable_p(obj), val)
|
||||
|
||||
#define ATTR_INDEX_NOT_SET (attr_index_t)-1
|
||||
|
||||
ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int));
|
||||
static inline VALUE
|
||||
vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
|
||||
|
@ -1155,31 +1157,22 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
|
|||
}
|
||||
|
||||
shape_id_t cached_id;
|
||||
|
||||
if (is_attr) {
|
||||
cached_id = vm_cc_attr_shape_id(cc);
|
||||
}
|
||||
else {
|
||||
cached_id = vm_ic_attr_shape_id(ic);
|
||||
}
|
||||
|
||||
attr_index_t index;
|
||||
|
||||
if (LIKELY(cached_id == shape_id)) {
|
||||
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
|
||||
if (is_attr) {
|
||||
vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
|
||||
}
|
||||
else {
|
||||
vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
|
||||
}
|
||||
|
||||
if (is_attr && vm_cc_attr_index_p(cc)) {
|
||||
index = vm_cc_attr_index(cc);
|
||||
}
|
||||
else if (!is_attr && vm_ic_attr_index_p(ic)) {
|
||||
index = vm_ic_attr_index(ic);
|
||||
}
|
||||
else {
|
||||
if(LIKELY(cached_id == shape_id)) {
|
||||
if (index == ATTR_INDEX_NOT_SET) {
|
||||
return Qnil;
|
||||
}
|
||||
|
||||
val = ivar_list[index];
|
||||
VM_ASSERT(BUILTIN_TYPE(obj) == T_OBJECT && rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
|
||||
RUBY_ASSERT(val != Qundef);
|
||||
}
|
||||
else { // cache miss case
|
||||
#if RUBY_DEBUG
|
||||
|
@ -1199,7 +1192,6 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
|
|||
}
|
||||
#endif
|
||||
|
||||
attr_index_t index;
|
||||
rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
|
||||
|
||||
if (rb_shape_get_iv_index(shape, id, &index)) {
|
||||
|
@ -1209,6 +1201,7 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
|
|||
|
||||
// We fetched the ivar list above
|
||||
val = ivar_list[index];
|
||||
RUBY_ASSERT(val != Qundef);
|
||||
}
|
||||
else {
|
||||
if (is_attr) {
|
||||
|
@ -1242,16 +1235,16 @@ general_path:
|
|||
}
|
||||
|
||||
static void
|
||||
populate_cache(attr_index_t index, shape_id_t shape_id, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
|
||||
populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
|
||||
{
|
||||
// Cache population code
|
||||
if (is_attr) {
|
||||
if (vm_cc_markable(cc)) {
|
||||
vm_cc_attr_index_set(cc, index, shape_id, next_shape_id);
|
||||
vm_cc_attr_index_set(cc, index, next_shape_id);
|
||||
}
|
||||
}
|
||||
else {
|
||||
vm_ic_attr_index_set(iseq, ic, index, shape_id, next_shape_id);
|
||||
vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1272,12 +1265,12 @@ vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic,
|
|||
|
||||
uint32_t num_iv = ROBJECT_NUMIV(obj);
|
||||
rb_shape_t* shape = rb_shape_get_shape(obj);
|
||||
shape_id_t current_shape_id = ROBJECT_SHAPE_ID(obj);
|
||||
shape_id_t next_shape_id = current_shape_id;
|
||||
shape_id_t next_shape_id = ROBJECT_SHAPE_ID(obj);
|
||||
|
||||
rb_shape_t* next_shape = rb_shape_get_next(shape, obj, id);
|
||||
|
||||
if (shape != next_shape) {
|
||||
RUBY_ASSERT(next_shape->parent_id == rb_shape_id(shape));
|
||||
rb_shape_set_shape(obj, next_shape);
|
||||
next_shape_id = ROBJECT_SHAPE_ID(obj);
|
||||
}
|
||||
|
@ -1287,7 +1280,7 @@ vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic,
|
|||
rb_raise(rb_eArgError, "too many instance variables");
|
||||
}
|
||||
|
||||
populate_cache(index, current_shape_id, next_shape_id, id, iseq, ic, cc, is_attr);
|
||||
populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
|
||||
}
|
||||
else {
|
||||
rb_bug("Didn't find instance variable %s\n", rb_id2name(id));
|
||||
|
@ -1295,6 +1288,7 @@ vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic,
|
|||
|
||||
// Ensure the IV buffer is wide enough to store the IV
|
||||
if (UNLIKELY(index >= num_iv)) {
|
||||
RUBY_ASSERT(index == num_iv);
|
||||
rb_init_iv_list(obj);
|
||||
}
|
||||
|
||||
|
@ -1309,7 +1303,6 @@ vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic,
|
|||
break;
|
||||
default:
|
||||
{
|
||||
shape_id_t shape_id = rb_shape_get_shape_id(obj);
|
||||
rb_ivar_set(obj, id, val);
|
||||
shape_id_t next_shape_id = rb_shape_get_shape_id(obj);
|
||||
rb_shape_t *next_shape = rb_shape_get_shape_by_id(next_shape_id);
|
||||
|
@ -1320,7 +1313,7 @@ vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic,
|
|||
rb_raise(rb_eArgError, "too many instance variables");
|
||||
}
|
||||
|
||||
populate_cache(index, shape_id, next_shape_id, id, iseq, ic, cc, is_attr);
|
||||
populate_cache(index, next_shape_id, id, iseq, ic, cc, is_attr);
|
||||
}
|
||||
else {
|
||||
rb_bug("didn't find the id\n");
|
||||
|
@ -1346,9 +1339,9 @@ vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache
|
|||
return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
|
||||
}
|
||||
|
||||
NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index));
|
||||
NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index));
|
||||
static VALUE
|
||||
vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
|
||||
vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
|
||||
{
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
|
||||
|
@ -1356,73 +1349,87 @@ vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shap
|
|||
shape_id_t shape_id = rb_generic_shape_id(obj);
|
||||
#endif
|
||||
|
||||
struct gen_ivtbl *ivtbl = 0;
|
||||
|
||||
// Cache hit case
|
||||
if (shape_id == source_shape_id) {
|
||||
if (shape_id == dest_shape_id) {
|
||||
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
|
||||
|
||||
struct gen_ivtbl *ivtbl = 0;
|
||||
if (dest_shape_id != shape_id) {
|
||||
ivtbl = rb_ensure_generic_iv_list_size(obj, index + 1);
|
||||
// Just get the IV table
|
||||
rb_gen_ivtbl_get(obj, 0, &ivtbl);
|
||||
}
|
||||
else if (dest_shape_id != INVALID_SHAPE_ID) {
|
||||
rb_shape_t * dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
|
||||
shape_id_t source_shape_id = dest_shape->parent_id;
|
||||
|
||||
if (shape_id == source_shape_id && dest_shape->edge_name == id && dest_shape->type == SHAPE_IVAR) {
|
||||
ivtbl = rb_ensure_generic_iv_list_size(obj, index + 1);
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
|
||||
RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
|
||||
#else
|
||||
ivtbl->shape_id = dest_shape_id;
|
||||
ivtbl->shape_id = dest_shape_id;
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
// Just get the IV table
|
||||
rb_gen_ivtbl_get(obj, 0, &ivtbl);
|
||||
return Qundef;
|
||||
}
|
||||
|
||||
VALUE *ptr = ivtbl->ivptr;
|
||||
|
||||
RB_OBJ_WRITE(obj, &ptr[index], val);
|
||||
|
||||
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
|
||||
|
||||
return val;
|
||||
}
|
||||
else {
|
||||
return Qundef;
|
||||
}
|
||||
|
||||
return Qundef;
|
||||
VALUE *ptr = ivtbl->ivptr;
|
||||
|
||||
RB_OBJ_WRITE(obj, &ptr[index], val);
|
||||
|
||||
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline VALUE
|
||||
vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
|
||||
vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t index)
|
||||
{
|
||||
#if OPT_IC_FOR_IVAR
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
{
|
||||
VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
|
||||
// If object's shape id is the same as the source
|
||||
// then do the shape transition and write the ivar
|
||||
// If object's shape id is the same as the dest
|
||||
// then write the ivar
|
||||
|
||||
shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
|
||||
|
||||
// Do we have a cache hit *and* is the CC intitialized
|
||||
if (shape_id == source_shape_id) {
|
||||
if (LIKELY(shape_id == dest_shape_id)) {
|
||||
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
|
||||
|
||||
VM_ASSERT(!rb_ractor_shareable_p(obj));
|
||||
|
||||
if (dest_shape_id != shape_id) {
|
||||
}
|
||||
else if (dest_shape_id != INVALID_SHAPE_ID) {
|
||||
rb_shape_t *dest_shape = rb_shape_get_shape_by_id(dest_shape_id);
|
||||
shape_id_t source_shape_id = dest_shape->parent_id;
|
||||
if (shape_id == source_shape_id && dest_shape->edge_name == id && dest_shape->type == SHAPE_IVAR) {
|
||||
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
|
||||
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
|
||||
rb_init_iv_list(obj);
|
||||
}
|
||||
|
||||
ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
|
||||
|
||||
RUBY_ASSERT(rb_shape_get_next(rb_shape_get_shape_by_id(source_shape_id), obj, id) == dest_shape);
|
||||
RUBY_ASSERT(index < ROBJECT_NUMIV(obj));
|
||||
|
||||
}
|
||||
|
||||
RUBY_ASSERT(index < ROBJECT_NUMIV(obj));
|
||||
|
||||
VALUE *ptr = ROBJECT_IVPTR(obj);
|
||||
|
||||
RB_OBJ_WRITE(obj, &ptr[index], val);
|
||||
|
||||
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
|
||||
|
||||
return val;
|
||||
else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
VALUE *ptr = ROBJECT_IVPTR(obj);
|
||||
|
||||
RB_OBJ_WRITE(obj, &ptr[index], val);
|
||||
|
||||
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
|
||||
return val;
|
||||
}
|
||||
break;
|
||||
case T_CLASS:
|
||||
|
@ -1528,17 +1535,18 @@ vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
|
|||
static inline void
|
||||
vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
|
||||
{
|
||||
shape_id_t source_shape_id = vm_ic_attr_index_source_shape_id(ic);
|
||||
attr_index_t index = vm_ic_attr_index(ic);
|
||||
shape_id_t dest_shape_id = vm_ic_attr_index_dest_shape_id(ic);
|
||||
if (UNLIKELY(vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index) == Qundef)) {
|
||||
shape_id_t dest_shape_id;
|
||||
attr_index_t index;
|
||||
vm_ic_atomic_shape_and_index(ic, &dest_shape_id, &index);
|
||||
|
||||
if (UNLIKELY(vm_setivar(obj, id, val, dest_shape_id, index) == Qundef)) {
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
break;
|
||||
default:
|
||||
if (vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index) != Qundef) {
|
||||
if (vm_setivar_default(obj, id, val, dest_shape_id, index) != Qundef) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -3254,12 +3262,11 @@ vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, cons
|
|||
RB_DEBUG_COUNTER_INC(ccf_attrset);
|
||||
VALUE val = *(cfp->sp - 1);
|
||||
cfp->sp -= 2;
|
||||
shape_id_t source_shape_id = vm_cc_attr_index_source_shape_id(cc);
|
||||
attr_index_t index = vm_cc_attr_index(cc);
|
||||
shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
|
||||
ID id = vm_cc_cme(cc)->def->body.attr.id;
|
||||
rb_check_frozen_internal(obj);
|
||||
VALUE res = vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index);
|
||||
VALUE res = vm_setivar(obj, id, val, dest_shape_id, index);
|
||||
if (res == Qundef) {
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
|
@ -3268,7 +3275,7 @@ vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, cons
|
|||
break;
|
||||
default:
|
||||
{
|
||||
res = vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index);
|
||||
res = vm_setivar_default(obj, id, val, dest_shape_id, index);
|
||||
if (res != Qundef) {
|
||||
return res;
|
||||
}
|
||||
|
@ -3894,8 +3901,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
|
|||
.call_ = cc->call_,
|
||||
.aux_ = {
|
||||
.attr = {
|
||||
.index = 0,
|
||||
.dest_shape_id = INVALID_SHAPE_ID,
|
||||
.value = INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT,
|
||||
}
|
||||
},
|
||||
});
|
||||
|
|
|
@ -1943,7 +1943,7 @@ fn gen_set_ivar(
|
|||
rb_vm_set_ivar_id as *const u8,
|
||||
vec![
|
||||
recv_opnd,
|
||||
ivar_index.into(),
|
||||
Opnd::UImm(ivar_name.into()),
|
||||
val_opnd,
|
||||
],
|
||||
);
|
||||
|
|
|
@ -406,11 +406,11 @@ pub type attr_index_t = u32;
|
|||
pub type shape_id_t = u32;
|
||||
#[repr(C)]
|
||||
pub struct rb_shape {
|
||||
pub parent: *mut rb_shape,
|
||||
pub edges: *mut rb_id_table,
|
||||
pub edge_name: ID,
|
||||
pub iv_count: attr_index_t,
|
||||
pub type_: u8,
|
||||
pub parent_id: shape_id_t,
|
||||
}
|
||||
pub type rb_shape_t = rb_shape;
|
||||
extern "C" {
|
||||
|
@ -775,10 +775,9 @@ pub struct iseq_inline_constant_cache {
|
|||
pub segments: *const ID,
|
||||
}
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct iseq_inline_iv_cache_entry {
|
||||
pub source_shape_id: shape_id_t,
|
||||
pub dest_shape_id: shape_id_t,
|
||||
pub attr_index: attr_index_t,
|
||||
pub value: usize,
|
||||
}
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
|
|
Загрузка…
Ссылка в новой задаче