зеркало из https://github.com/github/ruby.git
Revert "Revert "This commit implements the Object Shapes technique in CRuby.""
This reverts commit 9a6803c90b
.
This commit is contained in:
Родитель
5ffbb2be18
Коммит
ad63b668e2
|
@ -34,3 +34,19 @@ assert_equal %{ok}, %{
|
|||
print "ok"
|
||||
end
|
||||
}, '[ruby-core:15120]'
|
||||
|
||||
assert_equal %{ok}, %{
|
||||
class Big
|
||||
attr_reader :foo
|
||||
def initialize
|
||||
@foo = "ok"
|
||||
end
|
||||
end
|
||||
|
||||
obj = Big.new
|
||||
100.times do |i|
|
||||
obj.instance_variable_set(:"@ivar_\#{i}", i)
|
||||
end
|
||||
|
||||
Big.new.foo
|
||||
}
|
||||
|
|
322
common.mk
322
common.mk
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
26
compile.c
26
compile.c
|
@ -2058,20 +2058,7 @@ cdhash_set_label_i(VALUE key, VALUE val, VALUE ptr)
|
|||
static inline VALUE
|
||||
get_ivar_ic_value(rb_iseq_t *iseq,ID id)
|
||||
{
|
||||
VALUE val;
|
||||
struct rb_id_table *tbl = ISEQ_COMPILE_DATA(iseq)->ivar_cache_table;
|
||||
if (tbl) {
|
||||
if (rb_id_table_lookup(tbl,id,&val)) {
|
||||
return val;
|
||||
}
|
||||
}
|
||||
else {
|
||||
tbl = rb_id_table_create(1);
|
||||
ISEQ_COMPILE_DATA(iseq)->ivar_cache_table = tbl;
|
||||
}
|
||||
val = INT2FIX(ISEQ_BODY(iseq)->ivc_size++);
|
||||
rb_id_table_insert(tbl,id,val);
|
||||
return val;
|
||||
return INT2FIX(ISEQ_BODY(iseq)->ivc_size++);
|
||||
}
|
||||
|
||||
static inline VALUE
|
||||
|
@ -2472,9 +2459,13 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
|
|||
generated_iseq[code_index + 1 + j] = (VALUE)ic;
|
||||
}
|
||||
break;
|
||||
case TS_IVC: /* inline ivar cache */
|
||||
{
|
||||
unsigned int ic_index = FIX2UINT(operands[j]);
|
||||
vm_ic_attr_index_initialize(((IVC)&body->is_entries[ic_index]), INVALID_SHAPE_ID);
|
||||
}
|
||||
case TS_ISE: /* inline storage entry: `once` insn */
|
||||
case TS_ICVARC: /* inline cvar cache */
|
||||
case TS_IVC: /* inline ivar cache */
|
||||
{
|
||||
unsigned int ic_index = FIX2UINT(operands[j]);
|
||||
IC ic = &ISEQ_IS_ENTRY_START(body, type)[ic_index].ic_cache;
|
||||
|
@ -11514,6 +11505,11 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
|
|||
|
||||
ISE ic = ISEQ_IS_ENTRY_START(load_body, operand_type) + op;
|
||||
code[code_index] = (VALUE)ic;
|
||||
|
||||
if (operand_type == TS_IVC) {
|
||||
vm_ic_attr_index_initialize(((IVC)code[code_index]), INVALID_SHAPE_ID);
|
||||
}
|
||||
|
||||
}
|
||||
break;
|
||||
case TS_CALLDATA:
|
||||
|
|
|
@ -130,7 +130,6 @@ RB_DEBUG_COUNTER(frame_C2R)
|
|||
/* instance variable counts
|
||||
*
|
||||
* * ivar_get_ic_hit/miss: ivar_get inline cache (ic) hit/miss counts (VM insn)
|
||||
* * ivar_get_ic_miss_serial: ivar_get ic miss reason by serial (VM insn)
|
||||
* * ivar_get_ic_miss_unset: ... by unset (VM insn)
|
||||
* * ivar_get_ic_miss_noobject: ... by "not T_OBJECT" (VM insn)
|
||||
* * ivar_set_...: same counts with ivar_set (VM insn)
|
||||
|
@ -140,17 +139,17 @@ RB_DEBUG_COUNTER(frame_C2R)
|
|||
*/
|
||||
RB_DEBUG_COUNTER(ivar_get_ic_hit)
|
||||
RB_DEBUG_COUNTER(ivar_get_ic_miss)
|
||||
RB_DEBUG_COUNTER(ivar_get_ic_miss_serial)
|
||||
RB_DEBUG_COUNTER(ivar_get_ic_miss_unset)
|
||||
RB_DEBUG_COUNTER(ivar_get_ic_miss_noobject)
|
||||
RB_DEBUG_COUNTER(ivar_set_ic_hit)
|
||||
RB_DEBUG_COUNTER(ivar_set_ic_miss)
|
||||
RB_DEBUG_COUNTER(ivar_set_ic_miss_serial)
|
||||
RB_DEBUG_COUNTER(ivar_set_ic_miss_unset)
|
||||
RB_DEBUG_COUNTER(ivar_set_ic_miss_iv_hit)
|
||||
RB_DEBUG_COUNTER(ivar_set_ic_miss_noobject)
|
||||
RB_DEBUG_COUNTER(ivar_get_base)
|
||||
RB_DEBUG_COUNTER(ivar_set_base)
|
||||
RB_DEBUG_COUNTER(ivar_get_ic_miss_set)
|
||||
RB_DEBUG_COUNTER(ivar_get_cc_miss_set)
|
||||
RB_DEBUG_COUNTER(ivar_get_ic_miss_unset)
|
||||
RB_DEBUG_COUNTER(ivar_get_cc_miss_unset)
|
||||
|
||||
/* local variable counts
|
||||
*
|
||||
|
|
|
@ -165,7 +165,9 @@ coverage.o: $(top_srcdir)/ccan/check_type/check_type.h
|
|||
coverage.o: $(top_srcdir)/ccan/container_of/container_of.h
|
||||
coverage.o: $(top_srcdir)/ccan/list/list.h
|
||||
coverage.o: $(top_srcdir)/ccan/str/str.h
|
||||
coverage.o: $(top_srcdir)/constant.h
|
||||
coverage.o: $(top_srcdir)/gc.h
|
||||
coverage.o: $(top_srcdir)/id_table.h
|
||||
coverage.o: $(top_srcdir)/internal.h
|
||||
coverage.o: $(top_srcdir)/internal/array.h
|
||||
coverage.o: $(top_srcdir)/internal/compilers.h
|
||||
|
@ -176,12 +178,14 @@ coverage.o: $(top_srcdir)/internal/sanitizers.h
|
|||
coverage.o: $(top_srcdir)/internal/serial.h
|
||||
coverage.o: $(top_srcdir)/internal/static_assert.h
|
||||
coverage.o: $(top_srcdir)/internal/thread.h
|
||||
coverage.o: $(top_srcdir)/internal/variable.h
|
||||
coverage.o: $(top_srcdir)/internal/vm.h
|
||||
coverage.o: $(top_srcdir)/internal/warnings.h
|
||||
coverage.o: $(top_srcdir)/method.h
|
||||
coverage.o: $(top_srcdir)/node.h
|
||||
coverage.o: $(top_srcdir)/ruby_assert.h
|
||||
coverage.o: $(top_srcdir)/ruby_atomic.h
|
||||
coverage.o: $(top_srcdir)/shape.h
|
||||
coverage.o: $(top_srcdir)/thread_pthread.h
|
||||
coverage.o: $(top_srcdir)/vm_core.h
|
||||
coverage.o: $(top_srcdir)/vm_opts.h
|
||||
|
|
|
@ -350,6 +350,7 @@ objspace.o: $(top_srcdir)/internal/serial.h
|
|||
objspace.o: $(top_srcdir)/internal/static_assert.h
|
||||
objspace.o: $(top_srcdir)/internal/warnings.h
|
||||
objspace.o: $(top_srcdir)/node.h
|
||||
objspace.o: $(top_srcdir)/shape.h
|
||||
objspace.o: $(top_srcdir)/symbol.h
|
||||
objspace.o: objspace.c
|
||||
objspace.o: {$(VPATH)}id.h
|
||||
|
@ -533,7 +534,9 @@ objspace_dump.o: $(top_srcdir)/ccan/check_type/check_type.h
|
|||
objspace_dump.o: $(top_srcdir)/ccan/container_of/container_of.h
|
||||
objspace_dump.o: $(top_srcdir)/ccan/list/list.h
|
||||
objspace_dump.o: $(top_srcdir)/ccan/str/str.h
|
||||
objspace_dump.o: $(top_srcdir)/constant.h
|
||||
objspace_dump.o: $(top_srcdir)/gc.h
|
||||
objspace_dump.o: $(top_srcdir)/id_table.h
|
||||
objspace_dump.o: $(top_srcdir)/internal.h
|
||||
objspace_dump.o: $(top_srcdir)/internal/array.h
|
||||
objspace_dump.o: $(top_srcdir)/internal/compilers.h
|
||||
|
@ -544,12 +547,14 @@ objspace_dump.o: $(top_srcdir)/internal/sanitizers.h
|
|||
objspace_dump.o: $(top_srcdir)/internal/serial.h
|
||||
objspace_dump.o: $(top_srcdir)/internal/static_assert.h
|
||||
objspace_dump.o: $(top_srcdir)/internal/string.h
|
||||
objspace_dump.o: $(top_srcdir)/internal/variable.h
|
||||
objspace_dump.o: $(top_srcdir)/internal/vm.h
|
||||
objspace_dump.o: $(top_srcdir)/internal/warnings.h
|
||||
objspace_dump.o: $(top_srcdir)/method.h
|
||||
objspace_dump.o: $(top_srcdir)/node.h
|
||||
objspace_dump.o: $(top_srcdir)/ruby_assert.h
|
||||
objspace_dump.o: $(top_srcdir)/ruby_atomic.h
|
||||
objspace_dump.o: $(top_srcdir)/shape.h
|
||||
objspace_dump.o: $(top_srcdir)/thread_pthread.h
|
||||
objspace_dump.o: $(top_srcdir)/vm_core.h
|
||||
objspace_dump.o: $(top_srcdir)/vm_opts.h
|
||||
|
|
50
gc.c
50
gc.c
|
@ -2895,8 +2895,7 @@ rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
|
|||
GC_ASSERT((flags & RUBY_T_MASK) == T_OBJECT);
|
||||
GC_ASSERT(flags & ROBJECT_EMBED);
|
||||
|
||||
st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
|
||||
uint32_t index_tbl_num_entries = index_tbl == NULL ? 0 : (uint32_t)index_tbl->num_entries;
|
||||
uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
|
||||
|
||||
size_t size;
|
||||
bool embed = true;
|
||||
|
@ -2931,7 +2930,7 @@ rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
|
|||
#endif
|
||||
}
|
||||
else {
|
||||
rb_init_iv_list(obj);
|
||||
rb_ensure_iv_list_size(obj, 0, index_tbl_num_entries);
|
||||
}
|
||||
|
||||
return obj;
|
||||
|
@ -3206,20 +3205,6 @@ rb_free_const_table(struct rb_id_table *tbl)
|
|||
rb_id_table_free(tbl);
|
||||
}
|
||||
|
||||
static int
|
||||
free_iv_index_tbl_free_i(st_data_t key, st_data_t value, st_data_t data)
|
||||
{
|
||||
xfree((void *)value);
|
||||
return ST_CONTINUE;
|
||||
}
|
||||
|
||||
static void
|
||||
iv_index_tbl_free(struct st_table *tbl)
|
||||
{
|
||||
st_foreach(tbl, free_iv_index_tbl_free_i, 0);
|
||||
st_free_table(tbl);
|
||||
}
|
||||
|
||||
// alive: if false, target pointers can be freed already.
|
||||
// To check it, we need objspace parameter.
|
||||
static void
|
||||
|
@ -3435,6 +3420,16 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
|
|||
RB_DEBUG_COUNTER_INC(obj_obj_transient);
|
||||
}
|
||||
else {
|
||||
rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
|
||||
if (shape) {
|
||||
VALUE klass = RBASIC_CLASS(obj);
|
||||
|
||||
// Increment max_iv_count if applicable, used to determine size pool allocation
|
||||
uint32_t num_of_ivs = shape->iv_count;
|
||||
if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
|
||||
RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
|
||||
}
|
||||
}
|
||||
xfree(RANY(obj)->as.object.as.heap.ivptr);
|
||||
RB_DEBUG_COUNTER_INC(obj_obj_ptr);
|
||||
}
|
||||
|
@ -3449,9 +3444,6 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
|
|||
if (RCLASS_CONST_TBL(obj)) {
|
||||
rb_free_const_table(RCLASS_CONST_TBL(obj));
|
||||
}
|
||||
if (RCLASS_IV_INDEX_TBL(obj)) {
|
||||
iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj));
|
||||
}
|
||||
if (RCLASS_CVC_TBL(obj)) {
|
||||
rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
|
||||
rb_id_table_free(RCLASS_CVC_TBL(obj));
|
||||
|
@ -4873,10 +4865,6 @@ obj_memsize_of(VALUE obj, int use_all_types)
|
|||
if (RCLASS_CVC_TBL(obj)) {
|
||||
size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
|
||||
}
|
||||
if (RCLASS_IV_INDEX_TBL(obj)) {
|
||||
// TODO: more correct value
|
||||
size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
|
||||
}
|
||||
if (RCLASS_EXT(obj)->iv_tbl) {
|
||||
size += st_memsize(RCLASS_EXT(obj)->iv_tbl);
|
||||
}
|
||||
|
@ -10408,15 +10396,6 @@ update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
|
|||
}
|
||||
}
|
||||
|
||||
static int
|
||||
update_iv_index_tbl_i(st_data_t key, st_data_t value, st_data_t arg)
|
||||
{
|
||||
rb_objspace_t *objspace = (rb_objspace_t *)arg;
|
||||
struct rb_iv_index_tbl_entry *ent = (struct rb_iv_index_tbl_entry *)value;
|
||||
UPDATE_IF_MOVED(objspace, ent->class_value);
|
||||
return ST_CONTINUE;
|
||||
}
|
||||
|
||||
static void
|
||||
update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
|
||||
{
|
||||
|
@ -10424,11 +10403,6 @@ update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
|
|||
UPDATE_IF_MOVED(objspace, ext->includer);
|
||||
UPDATE_IF_MOVED(objspace, ext->refined_class);
|
||||
update_subclass_entries(objspace, ext->subclasses);
|
||||
|
||||
// ext->iv_index_tbl
|
||||
if (ext->iv_index_tbl) {
|
||||
st_foreach(ext->iv_index_tbl, update_iv_index_tbl_i, (st_data_t)objspace);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -46,7 +46,6 @@
|
|||
#define ROBJECT_EMBED ROBJECT_EMBED
|
||||
#define ROBJECT_NUMIV ROBJECT_NUMIV
|
||||
#define ROBJECT_IVPTR ROBJECT_IVPTR
|
||||
#define ROBJECT_IV_INDEX_TBL ROBJECT_IV_INDEX_TBL
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
|
@ -132,7 +131,7 @@ struct RObject {
|
|||
*
|
||||
* This is a shortcut for `RCLASS_IV_INDEX_TBL(rb_obj_class(obj))`.
|
||||
*/
|
||||
struct st_table *iv_index_tbl;
|
||||
struct rb_id_table *iv_index_tbl;
|
||||
} heap;
|
||||
|
||||
#if USE_RVARGC
|
||||
|
|
|
@ -941,21 +941,8 @@ RB_OBJ_FREEZE_RAW(VALUE obj)
|
|||
RB_FL_SET_RAW(obj, RUBY_FL_FREEZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prevents further modifications to the given object. ::rb_eFrozenError shall
|
||||
* be raised if modification is attempted.
|
||||
*
|
||||
* @param[out] x Object in question.
|
||||
*/
|
||||
static inline void
|
||||
rb_obj_freeze_inline(VALUE x)
|
||||
{
|
||||
if (RB_FL_ABLE(x)) {
|
||||
RB_OBJ_FREEZE_RAW(x);
|
||||
if (RBASIC_CLASS(x) && !(RBASIC(x)->flags & RUBY_FL_SINGLETON)) {
|
||||
rb_freeze_singleton_class(x);
|
||||
}
|
||||
}
|
||||
}
|
||||
RUBY_SYMBOL_EXPORT_BEGIN
|
||||
void rb_obj_freeze_inline(VALUE obj);
|
||||
RUBY_SYMBOL_EXPORT_END
|
||||
|
||||
#endif /* RBIMPL_FL_TYPE_H */
|
||||
|
|
1
inits.c
1
inits.c
|
@ -77,6 +77,7 @@ rb_call_inits(void)
|
|||
CALL(vm_stack_canary);
|
||||
CALL(ast);
|
||||
CALL(gc_stress);
|
||||
CALL(shape);
|
||||
|
||||
// enable builtin loading
|
||||
CALL(builtin);
|
||||
|
|
|
@ -48,9 +48,6 @@
|
|||
#undef RHASH_TBL
|
||||
#undef RHASH_EMPTY_P
|
||||
|
||||
/* internal/object.h */
|
||||
#undef ROBJECT_IV_INDEX_TBL
|
||||
|
||||
/* internal/struct.h */
|
||||
#undef RSTRUCT_LEN
|
||||
#undef RSTRUCT_PTR
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "ruby/internal/stdbool.h" /* for bool */
|
||||
#include "ruby/intern.h" /* for rb_alloc_func_t */
|
||||
#include "ruby/ruby.h" /* for struct RBasic */
|
||||
#include "shape.h"
|
||||
|
||||
#ifdef RCLASS_SUPER
|
||||
# undef RCLASS_SUPER
|
||||
|
@ -27,8 +28,8 @@ struct rb_subclass_entry {
|
|||
|
||||
struct rb_iv_index_tbl_entry {
|
||||
uint32_t index;
|
||||
rb_serial_t class_serial;
|
||||
VALUE class_value;
|
||||
shape_id_t source_shape_id;
|
||||
shape_id_t dest_shape_id;
|
||||
};
|
||||
|
||||
struct rb_cvar_class_tbl_entry {
|
||||
|
@ -38,7 +39,6 @@ struct rb_cvar_class_tbl_entry {
|
|||
};
|
||||
|
||||
struct rb_classext_struct {
|
||||
struct st_table *iv_index_tbl; // ID -> struct rb_iv_index_tbl_entry
|
||||
struct st_table *iv_tbl;
|
||||
#if SIZEOF_SERIAL_T == SIZEOF_VALUE /* otherwise m_tbl is in struct RClass */
|
||||
struct rb_id_table *m_tbl;
|
||||
|
@ -64,6 +64,10 @@ struct rb_classext_struct {
|
|||
const VALUE refined_class;
|
||||
rb_alloc_func_t allocator;
|
||||
const VALUE includer;
|
||||
uint32_t max_iv_count;
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
shape_id_t shape_id;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct RClass {
|
||||
|
@ -102,7 +106,6 @@ typedef struct rb_classext_struct rb_classext_t;
|
|||
#define RCLASS_CALLABLE_M_TBL(c) (RCLASS_EXT(c)->callable_m_tbl)
|
||||
#define RCLASS_CC_TBL(c) (RCLASS_EXT(c)->cc_tbl)
|
||||
#define RCLASS_CVC_TBL(c) (RCLASS_EXT(c)->cvc_tbl)
|
||||
#define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl)
|
||||
#define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin_)
|
||||
#define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class)
|
||||
#if SIZEOF_SERIAL_T == SIZEOF_VALUE
|
||||
|
|
|
@ -9,11 +9,6 @@
|
|||
* @brief Internal header for Object.
|
||||
*/
|
||||
#include "ruby/ruby.h" /* for VALUE */
|
||||
#include "internal/class.h" /* for RCLASS_IV_INDEX_TBL */
|
||||
|
||||
#ifdef ROBJECT_IV_INDEX_TBL
|
||||
# undef ROBJECT_IV_INDEX_TBL
|
||||
#endif
|
||||
|
||||
/* object.c */
|
||||
VALUE rb_class_search_ancestor(VALUE klass, VALUE super);
|
||||
|
@ -26,7 +21,6 @@ int rb_bool_expected(VALUE, const char *, int raise);
|
|||
static inline void RBASIC_CLEAR_CLASS(VALUE obj);
|
||||
static inline void RBASIC_SET_CLASS_RAW(VALUE obj, VALUE klass);
|
||||
static inline void RBASIC_SET_CLASS(VALUE obj, VALUE klass);
|
||||
static inline struct st_table *ROBJECT_IV_INDEX_TBL_inline(VALUE obj);
|
||||
|
||||
RUBY_SYMBOL_EXPORT_BEGIN
|
||||
/* object.c (export) */
|
||||
|
@ -64,20 +58,4 @@ RBASIC_SET_CLASS(VALUE obj, VALUE klass)
|
|||
RBASIC_SET_CLASS_RAW(obj, klass);
|
||||
RB_OBJ_WRITTEN(obj, oldv, klass);
|
||||
}
|
||||
|
||||
RBIMPL_ATTR_PURE()
|
||||
static inline struct st_table *
|
||||
ROBJECT_IV_INDEX_TBL_inline(VALUE obj)
|
||||
{
|
||||
if (RB_FL_ANY_RAW(obj, ROBJECT_EMBED)) {
|
||||
VALUE klass = rb_obj_class(obj);
|
||||
return RCLASS_IV_INDEX_TBL(klass);
|
||||
}
|
||||
else {
|
||||
const struct RObject *const ptr = ROBJECT(obj);
|
||||
return ptr->as.heap.iv_index_tbl;
|
||||
}
|
||||
}
|
||||
#define ROBJECT_IV_INDEX_TBL ROBJECT_IV_INDEX_TBL_inline
|
||||
|
||||
#endif /* INTERNAL_OBJECT_H */
|
||||
|
|
|
@ -37,6 +37,9 @@ static inline void ROBJ_TRANSIENT_SET(VALUE obj);
|
|||
static inline void ROBJ_TRANSIENT_UNSET(VALUE obj);
|
||||
uint32_t rb_obj_ensure_iv_index_mapping(VALUE obj, ID id);
|
||||
|
||||
struct gen_ivtbl;
|
||||
int rb_gen_ivtbl_get(VALUE obj, ID id, struct gen_ivtbl **ivtbl);
|
||||
|
||||
RUBY_SYMBOL_EXPORT_BEGIN
|
||||
/* variable.c (export) */
|
||||
void rb_mark_generic_ivar(VALUE);
|
||||
|
@ -52,6 +55,8 @@ VALUE rb_gvar_set(ID, VALUE);
|
|||
VALUE rb_gvar_defined(ID);
|
||||
void rb_const_warn_if_deprecated(const rb_const_entry_t *, VALUE, ID);
|
||||
void rb_init_iv_list(VALUE obj);
|
||||
void rb_ensure_iv_list_size(VALUE obj, uint32_t len, uint32_t newsize);
|
||||
struct gen_ivtbl * rb_ensure_generic_iv_list_size(VALUE obj, uint32_t newsize);
|
||||
MJIT_SYMBOL_EXPORT_END
|
||||
|
||||
static inline bool
|
||||
|
|
14
iseq.c
14
iseq.c
|
@ -230,18 +230,8 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
|
|||
union iseq_inline_storage_entry *is_entries = body->is_entries;
|
||||
|
||||
if (body->is_entries) {
|
||||
// IVC entries
|
||||
for (unsigned int i = 0; i < body->ivc_size; i++, is_entries++) {
|
||||
IVC ivc = (IVC)is_entries;
|
||||
if (ivc->entry) {
|
||||
RUBY_ASSERT(!RB_TYPE_P(ivc->entry->class_value, T_NONE));
|
||||
|
||||
VALUE nv = func(data, ivc->entry->class_value);
|
||||
if (ivc->entry->class_value != nv) {
|
||||
ivc->entry->class_value = nv;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Skip iterating over ivc caches
|
||||
is_entries += body->ivc_size;
|
||||
|
||||
// ICVARC entries
|
||||
for (unsigned int i = 0; i < body->icvarc_size; i++, is_entries++) {
|
||||
|
|
|
@ -73,23 +73,6 @@ module RubyVM::MJIT
|
|||
src << "#undef GET_SELF\n"
|
||||
src << "#define GET_SELF() cfp_self\n"
|
||||
|
||||
# Generate merged ivar guards first if needed
|
||||
if !status.compile_info.disable_ivar_cache && status.merge_ivar_guards_p
|
||||
src << " if (UNLIKELY(!(RB_TYPE_P(GET_SELF(), T_OBJECT) && (rb_serial_t)#{status.ivar_serial} == RCLASS_SERIAL(RBASIC(GET_SELF())->klass) &&"
|
||||
if USE_RVARGC
|
||||
src << "#{status.max_ivar_index} < ROBJECT_NUMIV(GET_SELF())" # index < ROBJECT_NUMIV(obj)
|
||||
else
|
||||
if status.max_ivar_index >= ROBJECT_EMBED_LEN_MAX
|
||||
src << "#{status.max_ivar_index} < ROBJECT_NUMIV(GET_SELF())" # index < ROBJECT_NUMIV(obj) && !RB_FL_ANY_RAW(obj, ROBJECT_EMBED)
|
||||
else
|
||||
src << "ROBJECT_EMBED_LEN_MAX == ROBJECT_NUMIV(GET_SELF())" # index < ROBJECT_NUMIV(obj) && RB_FL_ANY_RAW(obj, ROBJECT_EMBED)
|
||||
end
|
||||
end
|
||||
src << "))) {\n"
|
||||
src << " goto ivar_cancel;\n"
|
||||
src << " }\n"
|
||||
end
|
||||
|
||||
# Simulate `opt_pc` in setup_parameters_complex. Other PCs which may be passed by catch tables
|
||||
# are not considered since vm_exec doesn't call jit_exec for catch tables.
|
||||
if iseq.body.param.flags.has_opt
|
||||
|
@ -103,6 +86,13 @@ module RubyVM::MJIT
|
|||
src << " }\n"
|
||||
end
|
||||
|
||||
# Generate merged ivar guards first if needed
|
||||
if !status.compile_info.disable_ivar_cache && status.merge_ivar_guards_p
|
||||
src << " if (UNLIKELY(!(RB_TYPE_P(GET_SELF(), T_OBJECT)))) {"
|
||||
src << " goto ivar_cancel;\n"
|
||||
src << " }\n"
|
||||
end
|
||||
|
||||
C.fprintf(f, src)
|
||||
compile_insns(0, 0, status, iseq.body, f)
|
||||
compile_cancel_handler(f, iseq.body, status)
|
||||
|
@ -363,52 +353,37 @@ module RubyVM::MJIT
|
|||
ic_copy = (status.is_entries + (C.iseq_inline_storage_entry.new(operands[1]) - body.is_entries)).iv_cache
|
||||
|
||||
src = +''
|
||||
if !status.compile_info.disable_ivar_cache && ic_copy.entry
|
||||
if !status.compile_info.disable_ivar_cache && ic_copy.source_shape_id != C.INVALID_SHAPE_ID
|
||||
# JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
|
||||
# compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos)
|
||||
|
||||
# JIT: prepare vm_getivar/vm_setivar arguments and variables
|
||||
src << "{\n"
|
||||
src << " VALUE obj = GET_SELF();\n"
|
||||
src << " const uint32_t index = #{ic_copy.entry.index};\n"
|
||||
if status.merge_ivar_guards_p
|
||||
# JIT: Access ivar without checking these VM_ASSERTed prerequisites as we checked them in the beginning of `mjit_compile_body`
|
||||
src << " VM_ASSERT(RB_TYPE_P(obj, T_OBJECT));\n"
|
||||
src << " VM_ASSERT((rb_serial_t)#{ic_copy.entry.class_serial} == RCLASS_SERIAL(RBASIC(obj)->klass));\n"
|
||||
src << " VM_ASSERT(index < ROBJECT_NUMIV(obj));\n"
|
||||
if insn_name == :setinstancevariable
|
||||
if USE_RVARGC
|
||||
src << " if (LIKELY(!RB_OBJ_FROZEN_RAW(obj) && index < ROBJECT_NUMIV(obj))) {\n"
|
||||
src << " RB_OBJ_WRITE(obj, &ROBJECT_IVPTR(obj)[index], stack[#{stack_size - 1}]);\n"
|
||||
else
|
||||
heap_ivar_p = status.max_ivar_index >= ROBJECT_EMBED_LEN_MAX
|
||||
src << " if (LIKELY(!RB_OBJ_FROZEN_RAW(obj) && #{heap_ivar_p ? 'true' : 'RB_FL_ANY_RAW(obj, ROBJECT_EMBED)'})) {\n"
|
||||
src << " RB_OBJ_WRITE(obj, &ROBJECT(obj)->as.#{heap_ivar_p ? 'heap.ivptr[index]' : 'ary[index]'}, stack[#{stack_size - 1}]);\n"
|
||||
end
|
||||
src << " }\n"
|
||||
else
|
||||
src << " VALUE val;\n"
|
||||
if USE_RVARGC
|
||||
src << " if (LIKELY(index < ROBJECT_NUMIV(obj) && (val = ROBJECT_IVPTR(obj)[index]) != Qundef)) {\n"
|
||||
else
|
||||
heap_ivar_p = status.max_ivar_index >= ROBJECT_EMBED_LEN_MAX
|
||||
src << " if (LIKELY(#{heap_ivar_p ? 'true' : 'RB_FL_ANY_RAW(obj, ROBJECT_EMBED)'} && (val = ROBJECT(obj)->as.#{heap_ivar_p ? 'heap.ivptr[index]' : 'ary[index]'}) != Qundef)) {\n"
|
||||
end
|
||||
src << " stack[#{stack_size}] = val;\n"
|
||||
src << " }\n"
|
||||
end
|
||||
else
|
||||
src << " const rb_serial_t ic_serial = (rb_serial_t)#{ic_copy.entry.class_serial};\n"
|
||||
src << " const shape_id_t source_shape_id = (rb_serial_t)#{ic_copy.source_shape_id};\n"
|
||||
# JIT: cache hit path of vm_getivar/vm_setivar, or cancel JIT (recompile it with exivar)
|
||||
if insn_name == :setinstancevariable
|
||||
src << " if (LIKELY(RB_TYPE_P(obj, T_OBJECT) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && index < ROBJECT_NUMIV(obj) && !RB_OBJ_FROZEN_RAW(obj))) {\n"
|
||||
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
|
||||
src << " const shape_id_t dest_shape_id = (rb_serial_t)#{ic_copy.dest_shape_id};\n"
|
||||
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj) && \n"
|
||||
src << " dest_shape_id != ROBJECT_SHAPE_ID(obj)) {\n"
|
||||
src << " if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {\n"
|
||||
src << " rb_init_iv_list(obj);\n"
|
||||
src << " }\n"
|
||||
src << " ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);\n"
|
||||
src << " VALUE *ptr = ROBJECT_IVPTR(obj);\n"
|
||||
src << " RB_OBJ_WRITE(obj, &ptr[index], stack[#{stack_size - 1}]);\n"
|
||||
src << " }\n"
|
||||
else
|
||||
src << " VALUE val;\n"
|
||||
src << " if (LIKELY(RB_TYPE_P(obj, T_OBJECT) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && index < ROBJECT_NUMIV(obj) && (val = ROBJECT_IVPTR(obj)[index]) != Qundef)) {\n"
|
||||
src << " stack[#{stack_size}] = val;\n"
|
||||
if ic_copy.attr_index == 0 # cache hit, but uninitialized iv
|
||||
src << " /* Uninitialized instance variable */\n"
|
||||
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj)) {\n"
|
||||
src << " stack[#{stack_size}] = Qnil;\n"
|
||||
src << " }\n"
|
||||
else
|
||||
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
|
||||
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj)) {\n"
|
||||
src << " stack[#{stack_size}] = ROBJECT_IVPTR(obj)[index];\n"
|
||||
src << " }\n"
|
||||
end
|
||||
end
|
||||
|
@ -419,20 +394,19 @@ module RubyVM::MJIT
|
|||
src << " }\n"
|
||||
src << "}\n"
|
||||
return src
|
||||
elsif insn_name == :getinstancevariable && !status.compile_info.disable_exivar_cache && ic_copy.entry
|
||||
elsif insn_name == :getinstancevariable && !status.compile_info.disable_exivar_cache && ic_copy.source_shape_id != C.INVALID_SHAPE_ID
|
||||
# JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
|
||||
# compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos)
|
||||
|
||||
# JIT: prepare vm_getivar's arguments and variables
|
||||
src << "{\n"
|
||||
src << " VALUE obj = GET_SELF();\n"
|
||||
src << " const rb_serial_t ic_serial = (rb_serial_t)#{ic_copy.entry.class_serial};\n"
|
||||
src << " const uint32_t index = #{ic_copy.entry.index};\n"
|
||||
src << " const shape_id_t source_shape_id = (rb_serial_t)#{ic_copy.source_shape_id};\n"
|
||||
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
|
||||
# JIT: cache hit path of vm_getivar, or cancel JIT (recompile it without any ivar optimization)
|
||||
src << " struct gen_ivtbl *ivtbl;\n"
|
||||
src << " VALUE val;\n"
|
||||
src << " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && rb_ivar_generic_ivtbl_lookup(obj, &ivtbl) && index < ivtbl->numiv && (val = ivtbl->ivptr[index]) != Qundef)) {\n"
|
||||
src << " stack[#{stack_size}] = val;\n"
|
||||
src << " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && source_shape_id == rb_shape_get_shape_id(obj) && rb_ivar_generic_ivtbl_lookup(obj, &ivtbl))) {\n"
|
||||
src << " stack[#{stack_size}] = ivtbl->ivptr[index];\n"
|
||||
src << " }\n"
|
||||
src << " else {\n"
|
||||
src << " reg_cfp->pc = original_body_iseq + #{pos};\n"
|
||||
|
@ -832,35 +806,16 @@ module RubyVM::MJIT
|
|||
def init_ivar_compile_status(body, status)
|
||||
C.mjit_capture_is_entries(body, status.is_entries)
|
||||
|
||||
num_ivars = 0
|
||||
pos = 0
|
||||
status.max_ivar_index = 0
|
||||
status.ivar_serial = 0
|
||||
|
||||
while pos < body.iseq_size
|
||||
insn = INSNS.fetch(C.rb_vm_insn_decode(body.iseq_encoded[pos]))
|
||||
if insn.name == :getinstancevariable || insn.name == :setinstancevariable
|
||||
ic = body.iseq_encoded[pos+2]
|
||||
ic_copy = (status.is_entries + (C.iseq_inline_storage_entry.new(ic) - body.is_entries)).iv_cache
|
||||
if ic_copy.entry # Only initialized (ic_serial > 0) IVCs are optimized
|
||||
num_ivars += 1
|
||||
|
||||
if status.max_ivar_index < ic_copy.entry.index
|
||||
status.max_ivar_index = ic_copy.entry.index
|
||||
end
|
||||
|
||||
if status.ivar_serial == 0
|
||||
status.ivar_serial = ic_copy.entry.class_serial
|
||||
elsif status.ivar_serial != ic_copy.entry.class_serial
|
||||
# Multiple classes have used this ISeq. Give up assuming one serial.
|
||||
status.merge_ivar_guards_p = false
|
||||
status.merge_ivar_guards_p = true
|
||||
return
|
||||
end
|
||||
end
|
||||
end
|
||||
pos += insn.len
|
||||
end
|
||||
status.merge_ivar_guards_p = status.ivar_serial > 0 && num_ivars >= 2
|
||||
end
|
||||
|
||||
# Expand simple macro that doesn't require dynamic C code.
|
||||
|
|
10
marshal.c
10
marshal.c
|
@ -39,6 +39,7 @@
|
|||
#include "ruby/st.h"
|
||||
#include "ruby/util.h"
|
||||
#include "builtin.h"
|
||||
#include "shape.h"
|
||||
|
||||
#define BITSPERSHORT (2*CHAR_BIT)
|
||||
#define SHORTMASK ((1<<BITSPERSHORT)-1)
|
||||
|
@ -622,10 +623,6 @@ w_obj_each(st_data_t key, st_data_t val, st_data_t a)
|
|||
}
|
||||
return ST_CONTINUE;
|
||||
}
|
||||
if (!ivarg->num_ivar) {
|
||||
rb_raise(rb_eRuntimeError, "instance variable added to %"PRIsVALUE" instance",
|
||||
CLASS_OF(arg->obj));
|
||||
}
|
||||
--ivarg->num_ivar;
|
||||
w_symbol(ID2SYM(id), arg->arg);
|
||||
w_object(value, arg->arg, arg->limit);
|
||||
|
@ -720,6 +717,7 @@ has_ivars(VALUE obj, VALUE encname, VALUE *ivobj)
|
|||
static void
|
||||
w_ivar_each(VALUE obj, st_index_t num, struct dump_call_arg *arg)
|
||||
{
|
||||
shape_id_t shape_id = rb_shape_get_shape_id(arg->obj);
|
||||
struct w_ivar_arg ivarg = {arg, num};
|
||||
if (!num) return;
|
||||
rb_ivar_foreach(obj, w_obj_each, (st_data_t)&ivarg);
|
||||
|
@ -727,6 +725,10 @@ w_ivar_each(VALUE obj, st_index_t num, struct dump_call_arg *arg)
|
|||
rb_raise(rb_eRuntimeError, "instance variable removed from %"PRIsVALUE" instance",
|
||||
CLASS_OF(arg->obj));
|
||||
}
|
||||
if (shape_id != rb_shape_get_shape_id(arg->obj)) {
|
||||
rb_raise(rb_eRuntimeError, "instance variable added to %"PRIsVALUE" instance",
|
||||
CLASS_OF(arg->obj));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -418,6 +418,7 @@ def lldb_inspect(debugger, target, result, val):
|
|||
elif flType == RUBY_T_IMEMO:
|
||||
# I'm not sure how to get IMEMO_MASK out of lldb. It's not in globals()
|
||||
imemo_type = (flags >> RUBY_FL_USHIFT) & 0x0F # IMEMO_MASK
|
||||
|
||||
print("T_IMEMO: ", file=result)
|
||||
append_command_output(debugger, "p (enum imemo_type) %d" % imemo_type, result)
|
||||
append_command_output(debugger, "p *(struct MEMO *) %0#x" % val.GetValueAsUnsigned(), result)
|
||||
|
|
35
mjit_c.rb
35
mjit_c.rb
|
@ -5,6 +5,10 @@ module RubyVM::MJIT
|
|||
C = Object.new
|
||||
|
||||
class << C
|
||||
def SHAPE_BITS
|
||||
RubyVM::Shape::SHAPE_BITS
|
||||
end
|
||||
|
||||
def ROBJECT_EMBED_LEN_MAX
|
||||
Primitive.cexpr! 'INT2NUM(RBIMPL_EMBED_LEN_MAX_OF(VALUE))'
|
||||
end
|
||||
|
@ -165,6 +169,14 @@ module RubyVM::MJIT
|
|||
Primitive.cexpr! %q{ INT2NUM(VM_METHOD_TYPE_ISEQ) }
|
||||
end
|
||||
|
||||
def C.INVALID_SHAPE_ID
|
||||
Primitive.cexpr! %q{ ULONG2NUM(INVALID_SHAPE_ID) }
|
||||
end
|
||||
|
||||
def C.SHAPE_MASK
|
||||
Primitive.cexpr! %q{ ULONG2NUM(SHAPE_MASK) }
|
||||
end
|
||||
|
||||
def C.CALL_DATA
|
||||
@CALL_DATA ||= self.rb_call_data
|
||||
end
|
||||
|
@ -181,6 +193,10 @@ module RubyVM::MJIT
|
|||
@RB_BUILTIN ||= self.rb_builtin_function
|
||||
end
|
||||
|
||||
def C.attr_index_t
|
||||
@attr_index_t ||= CType::Immediate.parse("uint32_t")
|
||||
end
|
||||
|
||||
def C.compile_branch
|
||||
@compile_branch ||= CType::Struct.new(
|
||||
"compile_branch", Primitive.cexpr!("SIZEOF(struct compile_branch)"),
|
||||
|
@ -201,7 +217,6 @@ module RubyVM::MJIT
|
|||
compiled_id: [CType::Immediate.parse("int"), Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), compiled_id)")],
|
||||
compile_info: [CType::Pointer.new { self.rb_mjit_compile_info }, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), compile_info)")],
|
||||
merge_ivar_guards_p: [self._Bool, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), merge_ivar_guards_p)")],
|
||||
ivar_serial: [self.rb_serial_t, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), ivar_serial)")],
|
||||
max_ivar_index: [CType::Immediate.parse("size_t"), Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), max_ivar_index)")],
|
||||
inlined_iseqs: [CType::Pointer.new { CType::Pointer.new { self.rb_iseq_constant_body } }, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), inlined_iseqs)")],
|
||||
inline_context: [self.inlined_call_context, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), inline_context)")],
|
||||
|
@ -240,7 +255,9 @@ module RubyVM::MJIT
|
|||
def C.iseq_inline_iv_cache_entry
|
||||
@iseq_inline_iv_cache_entry ||= CType::Struct.new(
|
||||
"iseq_inline_iv_cache_entry", Primitive.cexpr!("SIZEOF(struct iseq_inline_iv_cache_entry)"),
|
||||
entry: [CType::Pointer.new { self.rb_iv_index_tbl_entry }, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), entry)")],
|
||||
source_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), source_shape_id)")],
|
||||
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), dest_shape_id)")],
|
||||
attr_index: [self.attr_index_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), attr_index)")],
|
||||
)
|
||||
end
|
||||
|
||||
|
@ -313,7 +330,11 @@ module RubyVM::MJIT
|
|||
call_: [self.vm_call_handler, Primitive.cexpr!("OFFSETOF((*((struct rb_callcache *)NULL)), call_)")],
|
||||
aux_: [CType::Union.new(
|
||||
"", Primitive.cexpr!("SIZEOF(((struct rb_callcache *)NULL)->aux_)"),
|
||||
attr_index: CType::Immediate.parse("unsigned int"),
|
||||
attr: CType::Struct.new(
|
||||
"", Primitive.cexpr!("SIZEOF(((struct rb_callcache *)NULL)->aux_.attr)"),
|
||||
index: [self.attr_index_t, Primitive.cexpr!("OFFSETOF(((struct rb_callcache *)NULL)->aux_.attr, index)")],
|
||||
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF(((struct rb_callcache *)NULL)->aux_.attr, dest_shape_id)")],
|
||||
),
|
||||
method_missing_reason: self.method_missing_reason,
|
||||
v: self.VALUE,
|
||||
), Primitive.cexpr!("OFFSETOF((*((struct rb_callcache *)NULL)), aux_)")],
|
||||
|
@ -503,8 +524,8 @@ module RubyVM::MJIT
|
|||
@rb_iv_index_tbl_entry ||= CType::Struct.new(
|
||||
"rb_iv_index_tbl_entry", Primitive.cexpr!("SIZEOF(struct rb_iv_index_tbl_entry)"),
|
||||
index: [CType::Immediate.parse("uint32_t"), Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), index)")],
|
||||
class_serial: [self.rb_serial_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), class_serial)")],
|
||||
class_value: [self.VALUE, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), class_value)")],
|
||||
source_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), source_shape_id)")],
|
||||
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), dest_shape_id)")],
|
||||
)
|
||||
end
|
||||
|
||||
|
@ -577,6 +598,10 @@ module RubyVM::MJIT
|
|||
@VALUE ||= CType::Immediate.find(Primitive.cexpr!("SIZEOF(VALUE)"), Primitive.cexpr!("SIGNED_TYPE_P(VALUE)"))
|
||||
end
|
||||
|
||||
def C.shape_id_t
|
||||
@shape_id_t ||= CType::Immediate.find(Primitive.cexpr!("SIZEOF(shape_id_t)"), Primitive.cexpr!("SIGNED_TYPE_P(shape_id_t)"))
|
||||
end
|
||||
|
||||
def C._Bool
|
||||
CType::Bool.new
|
||||
end
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "builtin.h"
|
||||
#include "mjit.h"
|
||||
#include "mjit_unit.h"
|
||||
#include "shape.h"
|
||||
|
||||
// Macros to check if a position is already compiled using compile_status.stack_size_for_pos
|
||||
#define NOT_COMPILED_STACK_SIZE -1
|
||||
|
@ -48,7 +49,6 @@ struct compile_status {
|
|||
// Mutated optimization levels
|
||||
struct rb_mjit_compile_info *compile_info;
|
||||
bool merge_ivar_guards_p; // If true, merge guards of ivar accesses
|
||||
rb_serial_t ivar_serial; // ic_serial of IVC in is_entries (used only when merge_ivar_guards_p)
|
||||
size_t max_ivar_index; // Max IVC index in is_entries (used only when merge_ivar_guards_p)
|
||||
// If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there.
|
||||
const struct rb_iseq_constant_body **inlined_iseqs;
|
||||
|
|
46
object.c
46
object.c
|
@ -39,6 +39,7 @@
|
|||
#include "ruby/util.h"
|
||||
#include "ruby/assert.h"
|
||||
#include "builtin.h"
|
||||
#include "shape.h"
|
||||
|
||||
/*!
|
||||
* \addtogroup object
|
||||
|
@ -271,9 +272,33 @@ rb_obj_copy_ivar(VALUE dest, VALUE obj)
|
|||
VALUE *src_buf = ROBJECT_IVPTR(obj);
|
||||
uint32_t dest_len = ROBJECT_NUMIV(dest);
|
||||
uint32_t src_len = ROBJECT_NUMIV(obj);
|
||||
uint32_t len = dest_len < src_len ? dest_len : src_len;
|
||||
uint32_t max_len = dest_len < src_len ? src_len : dest_len;
|
||||
|
||||
MEMCPY(dest_buf, src_buf, VALUE, len);
|
||||
rb_ensure_iv_list_size(dest, dest_len, max_len);
|
||||
|
||||
dest_len = ROBJECT_NUMIV(dest);
|
||||
uint32_t min_len = dest_len > src_len ? src_len : dest_len;
|
||||
|
||||
if (RBASIC(obj)->flags & ROBJECT_EMBED) {
|
||||
src_buf = ROBJECT(obj)->as.ary;
|
||||
|
||||
// embedded -> embedded
|
||||
if (RBASIC(dest)->flags & ROBJECT_EMBED) {
|
||||
dest_buf = ROBJECT(dest)->as.ary;
|
||||
}
|
||||
// embedded -> extended
|
||||
else {
|
||||
dest_buf = ROBJECT(dest)->as.heap.ivptr;
|
||||
}
|
||||
}
|
||||
// extended -> extended
|
||||
else {
|
||||
RUBY_ASSERT(!(RBASIC(dest)->flags & ROBJECT_EMBED));
|
||||
dest_buf = ROBJECT(dest)->as.heap.ivptr;
|
||||
src_buf = ROBJECT(obj)->as.heap.ivptr;
|
||||
}
|
||||
|
||||
MEMCPY(dest_buf, src_buf, VALUE, min_len);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -283,10 +308,23 @@ init_copy(VALUE dest, VALUE obj)
|
|||
rb_raise(rb_eTypeError, "[bug] frozen object (%s) allocated", rb_obj_classname(dest));
|
||||
}
|
||||
RBASIC(dest)->flags &= ~(T_MASK|FL_EXIVAR);
|
||||
// Copies the shape id from obj to dest
|
||||
RBASIC(dest)->flags |= RBASIC(obj)->flags & (T_MASK|FL_EXIVAR);
|
||||
rb_copy_wb_protected_attribute(dest, obj);
|
||||
rb_copy_generic_ivar(dest, obj);
|
||||
rb_gc_copy_finalizer(dest, obj);
|
||||
|
||||
rb_shape_t *shape_to_set = rb_shape_get_shape(obj);
|
||||
|
||||
// If the object is frozen, the "dup"'d object will *not* be frozen,
|
||||
// so we need to copy the frozen shape's parent to the new object.
|
||||
if (rb_shape_frozen_shape_p(shape_to_set)) {
|
||||
shape_to_set = shape_to_set->parent;
|
||||
}
|
||||
|
||||
// shape ids are different
|
||||
rb_shape_set_shape(dest, shape_to_set);
|
||||
|
||||
if (RB_TYPE_P(obj, T_OBJECT)) {
|
||||
rb_obj_copy_ivar(dest, obj);
|
||||
}
|
||||
|
@ -392,6 +430,9 @@ mutable_obj_clone(VALUE obj, VALUE kwfreeze)
|
|||
case Qnil:
|
||||
rb_funcall(clone, id_init_clone, 1, obj);
|
||||
RBASIC(clone)->flags |= RBASIC(obj)->flags & FL_FREEZE;
|
||||
if (RB_OBJ_FROZEN(obj)) {
|
||||
rb_shape_transition_shape_frozen(clone);
|
||||
}
|
||||
break;
|
||||
case Qtrue:
|
||||
{
|
||||
|
@ -407,6 +448,7 @@ mutable_obj_clone(VALUE obj, VALUE kwfreeze)
|
|||
argv[1] = freeze_true_hash;
|
||||
rb_funcallv_kw(clone, id_init_clone, 2, argv, RB_PASS_KEYWORDS);
|
||||
RBASIC(clone)->flags |= FL_FREEZE;
|
||||
rb_shape_transition_shape_frozen(clone);
|
||||
break;
|
||||
}
|
||||
case Qfalse:
|
||||
|
|
|
@ -289,11 +289,13 @@ rb_ractor_id(const rb_ractor_t *r)
|
|||
|
||||
#if RACTOR_CHECK_MODE > 0
|
||||
uint32_t rb_ractor_current_id(void);
|
||||
// If ractor check mode is enabled, shape bits needs to be smaller
|
||||
STATIC_ASSERT(shape_bits, SHAPE_BITS == 16);
|
||||
|
||||
static inline void
|
||||
rb_ractor_setup_belonging_to(VALUE obj, uint32_t rid)
|
||||
{
|
||||
VALUE flags = RBASIC(obj)->flags & 0xffffffff; // 4B
|
||||
VALUE flags = RBASIC(obj)->flags & 0xffff0000ffffffff; // 4B
|
||||
RBASIC(obj)->flags = flags | ((VALUE)rid << 32);
|
||||
}
|
||||
|
||||
|
@ -310,7 +312,7 @@ rb_ractor_belonging(VALUE obj)
|
|||
return 0;
|
||||
}
|
||||
else {
|
||||
return RBASIC(obj)->flags >> 32;
|
||||
return RBASIC(obj)->flags >> 32 & 0xFFFF;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,523 @@
|
|||
#include "vm_core.h"
|
||||
#include "vm_sync.h"
|
||||
#include "shape.h"
|
||||
#include "internal/class.h"
|
||||
#include "internal/symbol.h"
|
||||
#include "internal/variable.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
/*
|
||||
* Shape getters
|
||||
*/
|
||||
static rb_shape_t*
|
||||
rb_shape_get_root_shape(void) {
|
||||
return GET_VM()->root_shape;
|
||||
}
|
||||
|
||||
shape_id_t
|
||||
rb_shape_id(rb_shape_t * shape)
|
||||
{
|
||||
return (shape_id_t)(shape - GET_VM()->shape_list);
|
||||
}
|
||||
|
||||
static rb_shape_t*
|
||||
rb_shape_get_frozen_root_shape(void) {
|
||||
return GET_VM()->frozen_root_shape;
|
||||
}
|
||||
|
||||
bool
|
||||
rb_shape_root_shape_p(rb_shape_t* shape) {
|
||||
return shape == rb_shape_get_root_shape();
|
||||
}
|
||||
|
||||
rb_shape_t*
|
||||
rb_shape_get_shape_by_id(shape_id_t shape_id)
|
||||
{
|
||||
RUBY_ASSERT(shape_id != INVALID_SHAPE_ID);
|
||||
|
||||
rb_vm_t *vm = GET_VM();
|
||||
rb_shape_t *shape = &vm->shape_list[shape_id];
|
||||
return shape;
|
||||
}
|
||||
|
||||
rb_shape_t*
|
||||
rb_shape_get_shape_by_id_without_assertion(shape_id_t shape_id)
|
||||
{
|
||||
RUBY_ASSERT(shape_id != INVALID_SHAPE_ID);
|
||||
|
||||
rb_vm_t *vm = GET_VM();
|
||||
rb_shape_t *shape = &vm->shape_list[shape_id];
|
||||
return shape;
|
||||
}
|
||||
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
static inline shape_id_t
|
||||
RCLASS_SHAPE_ID(VALUE obj)
|
||||
{
|
||||
return RCLASS_EXT(obj)->shape_id;
|
||||
}
|
||||
|
||||
shape_id_t rb_generic_shape_id(VALUE obj);
|
||||
#endif
|
||||
|
||||
shape_id_t
|
||||
rb_shape_get_shape_id(VALUE obj)
|
||||
{
|
||||
if (RB_SPECIAL_CONST_P(obj)) {
|
||||
return FROZEN_ROOT_SHAPE_ID;
|
||||
}
|
||||
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
return RBASIC_SHAPE_ID(obj);
|
||||
#else
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
return ROBJECT_SHAPE_ID(obj);
|
||||
break;
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
return RCLASS_SHAPE_ID(obj);
|
||||
default:
|
||||
return rb_generic_shape_id(obj);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
rb_shape_t*
|
||||
rb_shape_get_shape(VALUE obj)
|
||||
{
|
||||
return rb_shape_get_shape_by_id(rb_shape_get_shape_id(obj));
|
||||
}
|
||||
|
||||
static rb_shape_t *
|
||||
rb_shape_lookup_id(rb_shape_t* shape, ID id, enum shape_type shape_type) {
|
||||
while (shape->parent) {
|
||||
if (shape->edge_name == id) {
|
||||
// If the shape type is different, we don't
|
||||
// want this to count as a "found" ID
|
||||
if (shape_type == (enum shape_type)shape->type) {
|
||||
return shape;
|
||||
}
|
||||
else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
shape = shape->parent;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static rb_shape_t*
|
||||
get_next_shape_internal(rb_shape_t* shape, ID id, VALUE obj, enum shape_type shape_type)
|
||||
{
|
||||
rb_shape_t *res = NULL;
|
||||
RUBY_ASSERT(SHAPE_FROZEN != (enum shape_type)shape->type);
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
if (rb_shape_lookup_id(shape, id, shape_type)) {
|
||||
// If shape already contains the ivar that is being set, we'll return shape
|
||||
res = shape;
|
||||
}
|
||||
else {
|
||||
if (!shape->edges) {
|
||||
shape->edges = rb_id_table_create(0);
|
||||
}
|
||||
|
||||
// Lookup the shape in edges - if there's already an edge and a corresponding shape for it,
|
||||
// we can return that. Otherwise, we'll need to get a new shape
|
||||
if (!rb_id_table_lookup(shape->edges, id, (VALUE *)&res)) {
|
||||
// In this case, the shape exists, but the shape is garbage, so we need to recreate it
|
||||
if (res) {
|
||||
rb_id_table_delete(shape->edges, id);
|
||||
res->parent = NULL;
|
||||
}
|
||||
|
||||
rb_shape_t * new_shape = rb_shape_alloc(id, shape);
|
||||
|
||||
new_shape->type = (uint8_t)shape_type;
|
||||
|
||||
switch(shape_type) {
|
||||
case SHAPE_IVAR:
|
||||
new_shape->iv_count = new_shape->parent->iv_count + 1;
|
||||
|
||||
// Check if we should update max_iv_count on the object's class
|
||||
if (BUILTIN_TYPE(obj) == T_OBJECT) {
|
||||
VALUE klass = rb_obj_class(obj);
|
||||
if (new_shape->iv_count > RCLASS_EXT(klass)->max_iv_count) {
|
||||
RCLASS_EXT(klass)->max_iv_count = new_shape->iv_count;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SHAPE_IVAR_UNDEF:
|
||||
case SHAPE_FROZEN:
|
||||
new_shape->iv_count = new_shape->parent->iv_count;
|
||||
break;
|
||||
case SHAPE_ROOT:
|
||||
rb_bug("Unreachable");
|
||||
break;
|
||||
}
|
||||
|
||||
rb_id_table_insert(shape->edges, id, (VALUE)new_shape);
|
||||
|
||||
res = new_shape;
|
||||
}
|
||||
}
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
return res;
|
||||
}
|
||||
|
||||
MJIT_FUNC_EXPORTED int
|
||||
rb_shape_frozen_shape_p(rb_shape_t* shape)
|
||||
{
|
||||
return SHAPE_FROZEN == (enum shape_type)shape->type;
|
||||
}
|
||||
|
||||
void
|
||||
rb_shape_transition_shape_remove_ivar(VALUE obj, ID id, rb_shape_t *shape)
|
||||
{
|
||||
rb_shape_t* next_shape = get_next_shape_internal(shape, id, obj, SHAPE_IVAR_UNDEF);
|
||||
|
||||
if (shape == next_shape) {
|
||||
return;
|
||||
}
|
||||
|
||||
rb_shape_set_shape(obj, next_shape);
|
||||
}
|
||||
|
||||
void
|
||||
rb_shape_transition_shape_frozen(VALUE obj)
|
||||
{
|
||||
rb_shape_t* shape = rb_shape_get_shape(obj);
|
||||
RUBY_ASSERT(shape);
|
||||
RUBY_ASSERT(RB_OBJ_FROZEN(obj));
|
||||
|
||||
if (rb_shape_frozen_shape_p(shape)) {
|
||||
return;
|
||||
}
|
||||
|
||||
rb_shape_t* next_shape;
|
||||
|
||||
if (shape == rb_shape_get_root_shape()) {
|
||||
switch(BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
next_shape = rb_shape_get_frozen_root_shape();
|
||||
}
|
||||
else {
|
||||
static ID id_frozen;
|
||||
if (!id_frozen) {
|
||||
id_frozen = rb_make_internal_id();
|
||||
}
|
||||
|
||||
next_shape = get_next_shape_internal(shape, (ID)id_frozen, obj, SHAPE_FROZEN);
|
||||
}
|
||||
|
||||
RUBY_ASSERT(next_shape);
|
||||
rb_shape_set_shape(obj, next_shape);
|
||||
}
|
||||
|
||||
void
|
||||
rb_shape_transition_shape(VALUE obj, ID id, rb_shape_t *shape)
|
||||
{
|
||||
rb_shape_t* next_shape = rb_shape_get_next(shape, obj, id);
|
||||
if (shape == next_shape) {
|
||||
return;
|
||||
}
|
||||
rb_shape_set_shape(obj, next_shape);
|
||||
}
|
||||
|
||||
rb_shape_t*
|
||||
rb_shape_get_next(rb_shape_t* shape, VALUE obj, ID id)
|
||||
{
|
||||
return get_next_shape_internal(shape, id, obj, SHAPE_IVAR);
|
||||
}
|
||||
|
||||
bool
|
||||
rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t *value) {
|
||||
while (shape->parent) {
|
||||
if (shape->edge_name == id) {
|
||||
enum shape_type shape_type;
|
||||
shape_type = (enum shape_type)shape->type;
|
||||
|
||||
switch(shape_type) {
|
||||
case SHAPE_IVAR:
|
||||
RUBY_ASSERT(shape->iv_count > 0);
|
||||
*value = shape->iv_count - 1;
|
||||
return true;
|
||||
case SHAPE_IVAR_UNDEF:
|
||||
case SHAPE_ROOT:
|
||||
return false;
|
||||
case SHAPE_FROZEN:
|
||||
rb_bug("Ivar should not exist on frozen transition\n");
|
||||
}
|
||||
}
|
||||
shape = shape->parent;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static rb_shape_t *
|
||||
shape_alloc(void)
|
||||
{
|
||||
rb_vm_t *vm = GET_VM();
|
||||
shape_id_t shape_id = vm->next_shape_id;
|
||||
vm->next_shape_id++;
|
||||
|
||||
if (shape_id == MAX_SHAPE_ID) {
|
||||
// TODO: Make an OutOfShapesError ??
|
||||
rb_bug("Out of shapes\n");
|
||||
}
|
||||
|
||||
return &GET_VM()->shape_list[shape_id];
|
||||
}
|
||||
|
||||
rb_shape_t *
|
||||
rb_shape_alloc(ID edge_name, rb_shape_t * parent)
|
||||
{
|
||||
rb_shape_t * shape = shape_alloc();
|
||||
|
||||
shape->edge_name = edge_name;
|
||||
shape->iv_count = 0;
|
||||
shape->parent = parent;
|
||||
|
||||
return shape;
|
||||
}
|
||||
|
||||
MJIT_FUNC_EXPORTED void
|
||||
rb_shape_set_shape(VALUE obj, rb_shape_t* shape)
|
||||
{
|
||||
rb_shape_set_shape_id(obj, rb_shape_id(shape));
|
||||
}
|
||||
|
||||
VALUE rb_cShape;
|
||||
|
||||
/*
|
||||
* Exposing Shape to Ruby via RubyVM.debug_shape
|
||||
*/
|
||||
static const rb_data_type_t shape_data_type = {
|
||||
"Shape",
|
||||
{NULL, NULL, NULL,},
|
||||
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
||||
};
|
||||
|
||||
static VALUE
|
||||
rb_wrapped_shape_id(VALUE self) {
|
||||
rb_shape_t * shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
return INT2NUM(rb_shape_id(shape));
|
||||
}
|
||||
|
||||
static VALUE
|
||||
rb_shape_type(VALUE self) {
|
||||
rb_shape_t * shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
return INT2NUM(shape->type);
|
||||
}
|
||||
|
||||
static VALUE
|
||||
rb_shape_parent_id(VALUE self)
|
||||
{
|
||||
rb_shape_t * shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
if (shape->parent) {
|
||||
return INT2NUM(rb_shape_id(shape->parent));
|
||||
}
|
||||
else {
|
||||
return Qnil;
|
||||
}
|
||||
}
|
||||
|
||||
static VALUE parse_key(ID key) {
|
||||
if ((key & RUBY_ID_INTERNAL) == RUBY_ID_INTERNAL) {
|
||||
return LONG2NUM(key);
|
||||
} else {
|
||||
return ID2SYM(key);
|
||||
}
|
||||
}
|
||||
|
||||
static VALUE
|
||||
rb_shape_t_to_rb_cShape(rb_shape_t *shape) {
|
||||
union { const rb_shape_t *in; void *out; } deconst;
|
||||
VALUE res;
|
||||
deconst.in = shape;
|
||||
res = TypedData_Wrap_Struct(rb_cShape, &shape_data_type, deconst.out);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static enum rb_id_table_iterator_result rb_edges_to_hash(ID key, VALUE value, void *ref)
|
||||
{
|
||||
rb_hash_aset(*(VALUE *)ref, parse_key(key), rb_shape_t_to_rb_cShape((rb_shape_t*)value));
|
||||
return ID_TABLE_CONTINUE;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
rb_shape_edges(VALUE self)
|
||||
{
|
||||
rb_shape_t* shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
|
||||
VALUE hash = rb_hash_new();
|
||||
|
||||
if (shape->edges) {
|
||||
rb_id_table_foreach(shape->edges, rb_edges_to_hash, &hash);
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
rb_shape_edge_name(VALUE self)
|
||||
{
|
||||
rb_shape_t* shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
|
||||
if (shape->edge_name) {
|
||||
return ID2SYM(shape->edge_name);
|
||||
}
|
||||
else {
|
||||
return Qnil;
|
||||
}
|
||||
}
|
||||
|
||||
static VALUE
|
||||
rb_shape_iv_count(VALUE self)
|
||||
{
|
||||
rb_shape_t* shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
|
||||
return INT2NUM(shape->iv_count);
|
||||
}
|
||||
|
||||
static VALUE
|
||||
rb_shape_export_depth(VALUE self)
|
||||
{
|
||||
rb_shape_t* shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
|
||||
unsigned int depth = 0;
|
||||
while (shape->parent) {
|
||||
depth++;
|
||||
shape = shape->parent;
|
||||
}
|
||||
return INT2NUM(depth);
|
||||
}
|
||||
|
||||
static VALUE
|
||||
rb_shape_parent(VALUE self)
|
||||
{
|
||||
rb_shape_t * shape;
|
||||
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
|
||||
if (shape->parent) {
|
||||
return rb_shape_t_to_rb_cShape(shape->parent);
|
||||
}
|
||||
else {
|
||||
return Qnil;
|
||||
}
|
||||
}
|
||||
|
||||
VALUE rb_shape_debug_shape(VALUE self, VALUE obj) {
|
||||
return rb_shape_t_to_rb_cShape(rb_shape_get_shape(obj));
|
||||
}
|
||||
|
||||
VALUE rb_shape_debug_root_shape(VALUE self) {
|
||||
return rb_shape_t_to_rb_cShape(rb_shape_get_root_shape());
|
||||
}
|
||||
|
||||
VALUE rb_shape_debug_frozen_root_shape(VALUE self) {
|
||||
return rb_shape_t_to_rb_cShape(rb_shape_get_frozen_root_shape());
|
||||
}
|
||||
|
||||
VALUE rb_obj_shape(rb_shape_t* shape);
|
||||
|
||||
static enum rb_id_table_iterator_result collect_keys_and_values(ID key, VALUE value, void *ref)
|
||||
{
|
||||
rb_hash_aset(*(VALUE *)ref, parse_key(key), rb_obj_shape((rb_shape_t*)value));
|
||||
return ID_TABLE_CONTINUE;
|
||||
}
|
||||
|
||||
static VALUE edges(struct rb_id_table* edges)
|
||||
{
|
||||
VALUE hash = rb_hash_new();
|
||||
if (edges)
|
||||
rb_id_table_foreach(edges, collect_keys_and_values, &hash);
|
||||
return hash;
|
||||
}
|
||||
|
||||
VALUE rb_obj_shape(rb_shape_t* shape) {
|
||||
VALUE rb_shape = rb_hash_new();
|
||||
|
||||
rb_hash_aset(rb_shape, ID2SYM(rb_intern("id")), INT2NUM(rb_shape_id(shape)));
|
||||
rb_hash_aset(rb_shape, ID2SYM(rb_intern("edges")), edges(shape->edges));
|
||||
|
||||
if (shape == rb_shape_get_root_shape()) {
|
||||
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(ROOT_SHAPE_ID));
|
||||
}
|
||||
else {
|
||||
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(rb_shape_id(shape->parent)));
|
||||
}
|
||||
|
||||
rb_hash_aset(rb_shape, ID2SYM(rb_intern("edge_name")), rb_id2str(shape->edge_name));
|
||||
return rb_shape;
|
||||
}
|
||||
|
||||
static VALUE shape_transition_tree(VALUE self) {
|
||||
return rb_obj_shape(rb_shape_get_root_shape());
|
||||
}
|
||||
|
||||
static VALUE shape_count(VALUE self) {
|
||||
int shape_count = 0;
|
||||
rb_vm_t *vm = GET_VM();
|
||||
for(shape_id_t i = 0; i < vm->next_shape_id; i++) {
|
||||
if(rb_shape_get_shape_by_id_without_assertion(i)) {
|
||||
shape_count++;
|
||||
}
|
||||
}
|
||||
return INT2NUM(shape_count);
|
||||
}
|
||||
|
||||
static VALUE
|
||||
shape_max_shape_count(VALUE self)
|
||||
{
|
||||
return INT2NUM(GET_VM()->next_shape_id);
|
||||
}
|
||||
|
||||
VALUE
|
||||
rb_shape_flags_mask(void)
|
||||
{
|
||||
return SHAPE_FLAG_MASK;
|
||||
}
|
||||
|
||||
void
|
||||
Init_shape(void)
|
||||
{
|
||||
rb_cShape = rb_define_class_under(rb_cRubyVM, "Shape", rb_cObject);
|
||||
rb_undef_alloc_func(rb_cShape);
|
||||
|
||||
rb_define_method(rb_cShape, "parent_id", rb_shape_parent_id, 0);
|
||||
rb_define_method(rb_cShape, "parent", rb_shape_parent, 0);
|
||||
rb_define_method(rb_cShape, "edges", rb_shape_edges, 0);
|
||||
rb_define_method(rb_cShape, "edge_name", rb_shape_edge_name, 0);
|
||||
rb_define_method(rb_cShape, "iv_count", rb_shape_iv_count, 0);
|
||||
rb_define_method(rb_cShape, "depth", rb_shape_export_depth, 0);
|
||||
rb_define_method(rb_cShape, "id", rb_wrapped_shape_id, 0);
|
||||
rb_define_method(rb_cShape, "type", rb_shape_type, 0);
|
||||
rb_define_const(rb_cShape, "SHAPE_ROOT", INT2NUM(SHAPE_ROOT));
|
||||
rb_define_const(rb_cShape, "SHAPE_IVAR", INT2NUM(SHAPE_IVAR));
|
||||
rb_define_const(rb_cShape, "SHAPE_IVAR_UNDEF", INT2NUM(SHAPE_IVAR_UNDEF));
|
||||
rb_define_const(rb_cShape, "SHAPE_FROZEN", INT2NUM(SHAPE_FROZEN));
|
||||
rb_define_const(rb_cShape, "SHAPE_BITS", INT2NUM(SHAPE_BITS));
|
||||
|
||||
rb_define_module_function(rb_cRubyVM, "debug_shape_transition_tree", shape_transition_tree, 0);
|
||||
rb_define_module_function(rb_cRubyVM, "debug_shape_count", shape_count, 0);
|
||||
rb_define_singleton_method(rb_cRubyVM, "debug_shape", rb_shape_debug_shape, 1);
|
||||
rb_define_singleton_method(rb_cRubyVM, "debug_max_shape_count", shape_max_shape_count, 0);
|
||||
rb_define_singleton_method(rb_cRubyVM, "debug_root_shape", rb_shape_debug_root_shape, 0);
|
||||
rb_define_singleton_method(rb_cRubyVM, "debug_frozen_root_shape", rb_shape_debug_frozen_root_shape, 0);
|
||||
}
|
|
@ -0,0 +1,150 @@
|
|||
#ifndef RUBY_SHAPE_H
|
||||
#define RUBY_SHAPE_H
|
||||
#if (SIZEOF_UINT64_T == SIZEOF_VALUE)
|
||||
#define SIZEOF_SHAPE_T 4
|
||||
#define SHAPE_IN_BASIC_FLAGS 1
|
||||
typedef uint32_t attr_index_t;
|
||||
#else
|
||||
#define SIZEOF_SHAPE_T 2
|
||||
#define SHAPE_IN_BASIC_FLAGS 0
|
||||
typedef uint16_t attr_index_t;
|
||||
#endif
|
||||
|
||||
#define MAX_IVARS (attr_index_t)(-1)
|
||||
|
||||
#if RUBY_DEBUG || (defined(VM_CHECK_MODE) && VM_CHECK_MODE > 0)
|
||||
# if SIZEOF_SHAPE_T == 4
|
||||
typedef uint32_t shape_id_t;
|
||||
# define SHAPE_BITS 16
|
||||
# else
|
||||
typedef uint16_t shape_id_t;
|
||||
# define SHAPE_BITS 16
|
||||
# endif
|
||||
#else
|
||||
# if SIZEOF_SHAPE_T == 4
|
||||
typedef uint32_t shape_id_t;
|
||||
# define SHAPE_BITS 32
|
||||
# else
|
||||
typedef uint16_t shape_id_t;
|
||||
# define SHAPE_BITS 16
|
||||
# endif
|
||||
#endif
|
||||
|
||||
# define SHAPE_MASK (((uintptr_t)1 << SHAPE_BITS) - 1)
|
||||
# define SHAPE_FLAG_MASK (((VALUE)-1) >> SHAPE_BITS)
|
||||
|
||||
# define SHAPE_FLAG_SHIFT ((SIZEOF_VALUE * 8) - SHAPE_BITS)
|
||||
|
||||
# define SHAPE_BITMAP_SIZE 16384
|
||||
|
||||
# define MAX_SHAPE_ID (SHAPE_MASK - 1)
|
||||
# define INVALID_SHAPE_ID SHAPE_MASK
|
||||
# define ROOT_SHAPE_ID 0x0
|
||||
# define FROZEN_ROOT_SHAPE_ID 0x1
|
||||
|
||||
struct rb_shape {
|
||||
struct rb_shape * parent; // Pointer to the parent
|
||||
struct rb_id_table * edges; // id_table from ID (ivar) to next shape
|
||||
ID edge_name; // ID (ivar) for transition from parent to rb_shape
|
||||
attr_index_t iv_count;
|
||||
uint8_t type;
|
||||
};
|
||||
|
||||
typedef struct rb_shape rb_shape_t;
|
||||
|
||||
enum shape_type {
|
||||
SHAPE_ROOT,
|
||||
SHAPE_IVAR,
|
||||
SHAPE_FROZEN,
|
||||
SHAPE_IVAR_UNDEF,
|
||||
};
|
||||
|
||||
static inline shape_id_t
|
||||
IMEMO_CACHED_SHAPE_ID(VALUE cc)
|
||||
{
|
||||
RBIMPL_ASSERT_TYPE((VALUE)cc, RUBY_T_IMEMO);
|
||||
return (shape_id_t)(SHAPE_MASK & (RBASIC(cc)->flags >> SHAPE_FLAG_SHIFT));
|
||||
}
|
||||
|
||||
static inline void
|
||||
IMEMO_SET_CACHED_SHAPE_ID(VALUE cc, shape_id_t shape_id)
|
||||
{
|
||||
RBIMPL_ASSERT_TYPE((VALUE)cc, RUBY_T_IMEMO);
|
||||
RBASIC(cc)->flags &= SHAPE_FLAG_MASK;
|
||||
RBASIC(cc)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
|
||||
}
|
||||
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
static inline shape_id_t
|
||||
RBASIC_SHAPE_ID(VALUE obj)
|
||||
{
|
||||
RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
|
||||
return (shape_id_t)(SHAPE_MASK & ((RBASIC(obj)->flags) >> SHAPE_FLAG_SHIFT));
|
||||
}
|
||||
|
||||
static inline void
|
||||
RBASIC_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
|
||||
{
|
||||
// Ractors are occupying the upper 32 bits of flags, but only in debug mode
|
||||
// Object shapes are occupying top bits
|
||||
RBASIC(obj)->flags &= SHAPE_FLAG_MASK;
|
||||
RBASIC(obj)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
ROBJECT_SHAPE_ID(VALUE obj)
|
||||
{
|
||||
RBIMPL_ASSERT_TYPE(obj, RUBY_T_OBJECT);
|
||||
return RBASIC_SHAPE_ID(obj);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ROBJECT_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
|
||||
{
|
||||
RBIMPL_ASSERT_TYPE(obj, RUBY_T_OBJECT);
|
||||
RBASIC_SET_SHAPE_ID(obj, shape_id);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline shape_id_t
|
||||
ROBJECT_SHAPE_ID(VALUE obj)
|
||||
{
|
||||
RBIMPL_ASSERT_TYPE(obj, RUBY_T_OBJECT);
|
||||
return (shape_id_t)(SHAPE_MASK & (RBASIC(obj)->flags >> SHAPE_FLAG_SHIFT));
|
||||
}
|
||||
|
||||
static inline void
|
||||
ROBJECT_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
|
||||
{
|
||||
RBASIC(obj)->flags &= SHAPE_FLAG_MASK;
|
||||
RBASIC(obj)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool rb_shape_root_shape_p(rb_shape_t* shape);
|
||||
|
||||
rb_shape_t* rb_shape_get_shape_by_id_without_assertion(shape_id_t shape_id);
|
||||
|
||||
MJIT_SYMBOL_EXPORT_BEGIN
|
||||
rb_shape_t* rb_shape_get_shape_by_id(shape_id_t shape_id);
|
||||
void rb_shape_set_shape(VALUE obj, rb_shape_t* shape);
|
||||
shape_id_t rb_shape_get_shape_id(VALUE obj);
|
||||
rb_shape_t* rb_shape_get_shape(VALUE obj);
|
||||
int rb_shape_frozen_shape_p(rb_shape_t* shape);
|
||||
void rb_shape_transition_shape_frozen(VALUE obj);
|
||||
void rb_shape_transition_shape_remove_ivar(VALUE obj, ID id, rb_shape_t *shape);
|
||||
void rb_shape_transition_shape(VALUE obj, ID id, rb_shape_t *shape);
|
||||
rb_shape_t* rb_shape_get_next(rb_shape_t* shape, VALUE obj, ID id);
|
||||
bool rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t * value);
|
||||
shape_id_t rb_shape_id(rb_shape_t * shape);
|
||||
MJIT_SYMBOL_EXPORT_END
|
||||
|
||||
rb_shape_t * rb_shape_alloc(ID edge_name, rb_shape_t * parent);
|
||||
|
||||
bool rb_shape_set_shape_id(VALUE obj, shape_id_t shape_id);
|
||||
|
||||
VALUE rb_obj_debug_shape(VALUE self, VALUE obj);
|
||||
VALUE rb_shape_flags_mask(void);
|
||||
|
||||
#endif
|
|
@ -7,6 +7,7 @@ module Bug end
|
|||
module Bug::Marshal
|
||||
class TestInternalIVar < Test::Unit::TestCase
|
||||
def test_marshal
|
||||
pend "We don't support IVs with ID of 0"
|
||||
v = InternalIVar.new("hello", "world", "bye")
|
||||
assert_equal("hello", v.normal)
|
||||
assert_equal("world", v.internal)
|
||||
|
|
|
@ -831,7 +831,7 @@ class TestMJIT < Test::Unit::TestCase
|
|||
end
|
||||
|
||||
def test_inlined_exivar
|
||||
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "aaa", success_count: 3, recompile_count: 1, min_calls: 2)
|
||||
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "aaa", success_count: 4, recompile_count: 2, min_calls: 2)
|
||||
begin;
|
||||
class Foo < Hash
|
||||
def initialize
|
||||
|
@ -850,7 +850,7 @@ class TestMJIT < Test::Unit::TestCase
|
|||
end
|
||||
|
||||
def test_inlined_undefined_ivar
|
||||
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "bbb", success_count: 3, min_calls: 3)
|
||||
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "bbb", success_count: 2, min_calls: 2)
|
||||
begin;
|
||||
class Foo
|
||||
def initialize
|
||||
|
|
|
@ -993,4 +993,13 @@ class TestObject < Test::Unit::TestCase
|
|||
end
|
||||
EOS
|
||||
end
|
||||
|
||||
def test_frozen_inspect
|
||||
obj = Object.new
|
||||
obj.instance_variable_set(:@a, "a")
|
||||
ins = obj.inspect
|
||||
obj.freeze
|
||||
|
||||
assert_equal(ins, obj.inspect)
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,173 @@
|
|||
# frozen_string_literal: false
|
||||
require 'test/unit'
|
||||
|
||||
# These test the functionality of object shapes
|
||||
class TestShapes < Test::Unit::TestCase
|
||||
class Example
|
||||
def initialize
|
||||
@a = 1
|
||||
end
|
||||
end
|
||||
|
||||
class RemoveAndAdd
|
||||
def add_foo
|
||||
@foo = 1
|
||||
end
|
||||
|
||||
def remove
|
||||
remove_instance_variable(:@foo)
|
||||
end
|
||||
|
||||
def add_bar
|
||||
@bar = 1
|
||||
end
|
||||
end
|
||||
|
||||
# RubyVM.debug_shape returns new instances of shape objects for
|
||||
# each call. This helper method allows us to define equality for
|
||||
# shapes
|
||||
def assert_shape_equal(shape1, shape2)
|
||||
assert_equal(shape1.id, shape2.id)
|
||||
assert_equal(shape1.parent_id, shape2.parent_id)
|
||||
assert_equal(shape1.depth, shape2.depth)
|
||||
assert_equal(shape1.type, shape2.type)
|
||||
end
|
||||
|
||||
def refute_shape_equal(shape1, shape2)
|
||||
refute_equal(shape1.id, shape2.id)
|
||||
end
|
||||
|
||||
def test_iv_index
|
||||
example = RemoveAndAdd.new
|
||||
shape = RubyVM.debug_shape(example)
|
||||
assert_equal 0, shape.iv_count
|
||||
|
||||
example.add_foo # makes a transition
|
||||
new_shape = RubyVM.debug_shape(example)
|
||||
assert_equal([:@foo], example.instance_variables)
|
||||
assert_equal(shape.id, new_shape.parent.id)
|
||||
assert_equal(1, new_shape.iv_count)
|
||||
|
||||
example.remove # makes a transition
|
||||
remove_shape = RubyVM.debug_shape(example)
|
||||
assert_equal([], example.instance_variables)
|
||||
assert_equal(new_shape.id, remove_shape.parent.id)
|
||||
assert_equal(1, remove_shape.iv_count)
|
||||
|
||||
example.add_bar # makes a transition
|
||||
bar_shape = RubyVM.debug_shape(example)
|
||||
assert_equal([:@bar], example.instance_variables)
|
||||
assert_equal(remove_shape.id, bar_shape.parent.id)
|
||||
assert_equal(2, bar_shape.iv_count)
|
||||
end
|
||||
|
||||
def test_new_obj_has_root_shape
|
||||
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(Object.new))
|
||||
end
|
||||
|
||||
def test_frozen_new_obj_has_frozen_root_shape
|
||||
assert_shape_equal(
|
||||
RubyVM.debug_frozen_root_shape,
|
||||
RubyVM.debug_shape(Object.new.freeze)
|
||||
)
|
||||
end
|
||||
|
||||
def test_str_has_root_shape
|
||||
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(""))
|
||||
end
|
||||
|
||||
def test_array_has_root_shape
|
||||
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape([]))
|
||||
end
|
||||
|
||||
def test_hash_has_root_shape
|
||||
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape({}))
|
||||
end
|
||||
|
||||
def test_true_has_frozen_root_shape
|
||||
assert_shape_equal(RubyVM.debug_frozen_root_shape, RubyVM.debug_shape(true))
|
||||
end
|
||||
|
||||
def test_nil_has_frozen_root_shape
|
||||
assert_shape_equal(RubyVM.debug_frozen_root_shape, RubyVM.debug_shape(nil))
|
||||
end
|
||||
|
||||
def test_basic_shape_transition
|
||||
obj = Example.new
|
||||
refute_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(obj))
|
||||
assert_shape_equal(RubyVM.debug_root_shape.edges[:@a], RubyVM.debug_shape(obj))
|
||||
assert_equal(obj.instance_variable_get(:@a), 1)
|
||||
end
|
||||
|
||||
def test_different_objects_make_same_transition
|
||||
obj = Example.new
|
||||
obj2 = ""
|
||||
obj2.instance_variable_set(:@a, 1)
|
||||
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
end
|
||||
|
||||
def test_duplicating_objects
|
||||
obj = Example.new
|
||||
obj2 = obj.dup
|
||||
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
end
|
||||
|
||||
def test_freezing_and_duplicating_object
|
||||
obj = Object.new.freeze
|
||||
obj2 = obj.dup
|
||||
refute_predicate(obj2, :frozen?)
|
||||
# dup'd objects shouldn't be frozen, and the shape should be the
|
||||
# parent shape of the copied object
|
||||
assert_equal(RubyVM.debug_shape(obj).parent.id, RubyVM.debug_shape(obj2).id)
|
||||
end
|
||||
|
||||
def test_freezing_and_duplicating_object_with_ivars
|
||||
obj = Example.new.freeze
|
||||
obj2 = obj.dup
|
||||
refute_predicate(obj2, :frozen?)
|
||||
refute_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
assert_equal(obj2.instance_variable_get(:@a), 1)
|
||||
end
|
||||
|
||||
def test_freezing_and_duplicating_string_with_ivars
|
||||
str = "str"
|
||||
str.instance_variable_set(:@a, 1)
|
||||
str.freeze
|
||||
str2 = str.dup
|
||||
refute_predicate(str2, :frozen?)
|
||||
refute_equal(RubyVM.debug_shape(str).id, RubyVM.debug_shape(str2).id)
|
||||
assert_equal(str2.instance_variable_get(:@a), 1)
|
||||
end
|
||||
|
||||
def test_freezing_and_cloning_objects
|
||||
obj = Object.new.freeze
|
||||
obj2 = obj.clone(freeze: true)
|
||||
assert_predicate(obj2, :frozen?)
|
||||
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
end
|
||||
|
||||
def test_freezing_and_cloning_object_with_ivars
|
||||
obj = Example.new.freeze
|
||||
obj2 = obj.clone(freeze: true)
|
||||
assert_predicate(obj2, :frozen?)
|
||||
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
|
||||
assert_equal(obj2.instance_variable_get(:@a), 1)
|
||||
end
|
||||
|
||||
def test_freezing_and_cloning_string
|
||||
str = "str".freeze
|
||||
str2 = str.clone(freeze: true)
|
||||
assert_predicate(str2, :frozen?)
|
||||
assert_shape_equal(RubyVM.debug_shape(str), RubyVM.debug_shape(str2))
|
||||
end
|
||||
|
||||
def test_freezing_and_cloning_string_with_ivars
|
||||
str = "str"
|
||||
str.instance_variable_set(:@a, 1)
|
||||
str.freeze
|
||||
str2 = str.clone(freeze: true)
|
||||
assert_predicate(str2, :frozen?)
|
||||
assert_shape_equal(RubyVM.debug_shape(str), RubyVM.debug_shape(str2))
|
||||
assert_equal(str2.instance_variable_get(:@a), 1)
|
||||
end
|
||||
end
|
|
@ -341,12 +341,17 @@ generator = BindingGenerator.new(
|
|||
VM_METHOD_TYPE_CFUNC
|
||||
VM_METHOD_TYPE_ISEQ
|
||||
],
|
||||
ULONG: %w[
|
||||
INVALID_SHAPE_ID
|
||||
SHAPE_MASK
|
||||
],
|
||||
},
|
||||
types: %w[
|
||||
CALL_DATA
|
||||
IC
|
||||
IVC
|
||||
RB_BUILTIN
|
||||
attr_index_t
|
||||
compile_branch
|
||||
compile_status
|
||||
inlined_call_context
|
||||
|
@ -360,10 +365,10 @@ generator = BindingGenerator.new(
|
|||
rb_callable_method_entry_struct
|
||||
rb_callcache
|
||||
rb_callinfo
|
||||
rb_cref_t
|
||||
rb_control_frame_t
|
||||
rb_execution_context_t
|
||||
rb_cref_t
|
||||
rb_execution_context_struct
|
||||
rb_execution_context_t
|
||||
rb_iseq_constant_body
|
||||
rb_iseq_location_t
|
||||
rb_iseq_struct
|
||||
|
@ -378,6 +383,7 @@ generator = BindingGenerator.new(
|
|||
],
|
||||
dynamic_types: %w[
|
||||
VALUE
|
||||
shape_id_t
|
||||
],
|
||||
skip_fields: {
|
||||
'rb_execution_context_struct.machine': %w[regs], # differs between macOS and Linux
|
||||
|
|
735
variable.c
735
variable.c
|
@ -34,6 +34,7 @@
|
|||
#include "ruby/st.h"
|
||||
#include "ruby/util.h"
|
||||
#include "transient_heap.h"
|
||||
#include "shape.h"
|
||||
#include "variable.h"
|
||||
#include "vm_core.h"
|
||||
#include "ractor_core.h"
|
||||
|
@ -63,12 +64,9 @@ static VALUE rb_const_search(VALUE klass, ID id, int exclude, int recurse, int v
|
|||
static st_table *generic_iv_tbl_;
|
||||
|
||||
struct ivar_update {
|
||||
union {
|
||||
st_table *iv_index_tbl;
|
||||
struct gen_ivtbl *ivtbl;
|
||||
} u;
|
||||
st_data_t index;
|
||||
int iv_extended;
|
||||
uint32_t iv_index;
|
||||
rb_shape_t* shape;
|
||||
};
|
||||
|
||||
void
|
||||
|
@ -896,30 +894,6 @@ rb_alias_variable(ID name1, ID name2)
|
|||
entry1->var = entry2->var;
|
||||
}
|
||||
|
||||
static bool
|
||||
iv_index_tbl_lookup(struct st_table *tbl, ID id, uint32_t *indexp)
|
||||
{
|
||||
st_data_t ent_data;
|
||||
int r;
|
||||
|
||||
if (tbl == NULL) return false;
|
||||
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
r = st_lookup(tbl, (st_data_t)id, &ent_data);
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
|
||||
if (r) {
|
||||
struct rb_iv_index_tbl_entry *ent = (void *)ent_data;
|
||||
*indexp = ent->index;
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(ID id)
|
||||
{
|
||||
|
@ -957,7 +931,20 @@ generic_ivtbl_no_ractor_check(VALUE obj)
|
|||
}
|
||||
|
||||
static int
|
||||
gen_ivtbl_get(VALUE obj, ID id, struct gen_ivtbl **ivtbl)
|
||||
gen_ivtbl_get_unlocked(VALUE obj, ID id, struct gen_ivtbl **ivtbl)
|
||||
{
|
||||
st_data_t data;
|
||||
|
||||
if (st_lookup(generic_ivtbl(obj, id, false), (st_data_t)obj, &data)) {
|
||||
*ivtbl = (struct gen_ivtbl *)data;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
MJIT_FUNC_EXPORTED int
|
||||
rb_gen_ivtbl_get(VALUE obj, ID id, struct gen_ivtbl **ivtbl)
|
||||
{
|
||||
st_data_t data;
|
||||
int r = 0;
|
||||
|
@ -977,63 +964,7 @@ gen_ivtbl_get(VALUE obj, ID id, struct gen_ivtbl **ivtbl)
|
|||
MJIT_FUNC_EXPORTED int
|
||||
rb_ivar_generic_ivtbl_lookup(VALUE obj, struct gen_ivtbl **ivtbl)
|
||||
{
|
||||
return gen_ivtbl_get(obj, 0, ivtbl);
|
||||
}
|
||||
|
||||
MJIT_FUNC_EXPORTED VALUE
|
||||
rb_ivar_generic_lookup_with_index(VALUE obj, ID id, uint32_t index)
|
||||
{
|
||||
struct gen_ivtbl *ivtbl;
|
||||
|
||||
if (gen_ivtbl_get(obj, id, &ivtbl)) {
|
||||
if (LIKELY(index < ivtbl->numiv)) {
|
||||
VALUE val = ivtbl->ivptr[index];
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
||||
return Qundef;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
generic_ivar_delete(VALUE obj, ID id, VALUE undef)
|
||||
{
|
||||
struct gen_ivtbl *ivtbl;
|
||||
|
||||
if (gen_ivtbl_get(obj, id, &ivtbl)) {
|
||||
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
|
||||
uint32_t index;
|
||||
|
||||
if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &index)) {
|
||||
if (index < ivtbl->numiv) {
|
||||
VALUE ret = ivtbl->ivptr[index];
|
||||
|
||||
ivtbl->ivptr[index] = Qundef;
|
||||
return ret == Qundef ? undef : ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
return undef;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
generic_ivar_get(VALUE obj, ID id, VALUE undef)
|
||||
{
|
||||
struct gen_ivtbl *ivtbl;
|
||||
|
||||
if (gen_ivtbl_get(obj, id, &ivtbl)) {
|
||||
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
|
||||
uint32_t index;
|
||||
|
||||
if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &index)) {
|
||||
if (index < ivtbl->numiv) {
|
||||
VALUE ret = ivtbl->ivptr[index];
|
||||
|
||||
return ret == Qundef ? undef : ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
return undef;
|
||||
return rb_gen_ivtbl_get(obj, 0, ivtbl);
|
||||
}
|
||||
|
||||
static size_t
|
||||
|
@ -1045,6 +976,8 @@ gen_ivtbl_bytes(size_t n)
|
|||
static struct gen_ivtbl *
|
||||
gen_ivtbl_resize(struct gen_ivtbl *old, uint32_t n)
|
||||
{
|
||||
RUBY_ASSERT(n > 0);
|
||||
|
||||
uint32_t len = old ? old->numiv : 0;
|
||||
struct gen_ivtbl *ivtbl = xrealloc(old, gen_ivtbl_bytes(n));
|
||||
|
||||
|
@ -1069,18 +1002,6 @@ gen_ivtbl_dup(const struct gen_ivtbl *orig)
|
|||
}
|
||||
#endif
|
||||
|
||||
static uint32_t
|
||||
iv_index_tbl_newsize(struct ivar_update *ivup)
|
||||
{
|
||||
if (!ivup->iv_extended) {
|
||||
return (uint32_t)ivup->u.iv_index_tbl->num_entries;
|
||||
}
|
||||
else {
|
||||
uint32_t index = (uint32_t)ivup->index; /* should not overflow */
|
||||
return (index+1) + (index+1)/4; /* (index+1)*1.25 */
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
generic_ivar_update(st_data_t *k, st_data_t *v, st_data_t u, int existing)
|
||||
{
|
||||
|
@ -1091,53 +1012,22 @@ generic_ivar_update(st_data_t *k, st_data_t *v, st_data_t u, int existing)
|
|||
|
||||
if (existing) {
|
||||
ivtbl = (struct gen_ivtbl *)*v;
|
||||
if (ivup->index < ivtbl->numiv) {
|
||||
ivup->u.ivtbl = ivtbl;
|
||||
if (ivup->iv_index < ivtbl->numiv) {
|
||||
ivup->ivtbl = ivtbl;
|
||||
return ST_STOP;
|
||||
}
|
||||
}
|
||||
FL_SET((VALUE)*k, FL_EXIVAR);
|
||||
uint32_t newsize = iv_index_tbl_newsize(ivup);
|
||||
ivtbl = gen_ivtbl_resize(ivtbl, newsize);
|
||||
ivtbl = gen_ivtbl_resize(ivtbl, ivup->shape->iv_count);
|
||||
// Reinsert in to the hash table because ivtbl might be a newly resized chunk of memory
|
||||
*v = (st_data_t)ivtbl;
|
||||
ivup->u.ivtbl = ivtbl;
|
||||
ivup->ivtbl = ivtbl;
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
ivtbl->shape_id = rb_shape_id(ivup->shape);
|
||||
#endif
|
||||
return ST_CONTINUE;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
generic_ivar_defined(VALUE obj, ID id)
|
||||
{
|
||||
struct gen_ivtbl *ivtbl;
|
||||
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
|
||||
uint32_t index;
|
||||
|
||||
if (!iv_index_tbl_lookup(iv_index_tbl, id, &index)) return Qfalse;
|
||||
if (!gen_ivtbl_get(obj, id, &ivtbl)) return Qfalse;
|
||||
|
||||
return RBOOL((index < ivtbl->numiv) && (ivtbl->ivptr[index] != Qundef));
|
||||
}
|
||||
|
||||
static int
|
||||
generic_ivar_remove(VALUE obj, ID id, VALUE *valp)
|
||||
{
|
||||
struct gen_ivtbl *ivtbl;
|
||||
uint32_t index;
|
||||
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
|
||||
|
||||
if (!iv_index_tbl) return 0;
|
||||
if (!iv_index_tbl_lookup(iv_index_tbl, id, &index)) return 0;
|
||||
if (!gen_ivtbl_get(obj, id, &ivtbl)) return 0;
|
||||
|
||||
if (index < ivtbl->numiv) {
|
||||
if (ivtbl->ivptr[index] != Qundef) {
|
||||
*valp = ivtbl->ivptr[index];
|
||||
ivtbl->ivptr[index] = Qundef;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gen_ivtbl_mark(const struct gen_ivtbl *ivtbl)
|
||||
{
|
||||
|
@ -1153,7 +1043,7 @@ rb_mark_generic_ivar(VALUE obj)
|
|||
{
|
||||
struct gen_ivtbl *ivtbl;
|
||||
|
||||
if (gen_ivtbl_get(obj, 0, &ivtbl)) {
|
||||
if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
|
||||
gen_ivtbl_mark(ivtbl);
|
||||
}
|
||||
}
|
||||
|
@ -1182,11 +1072,35 @@ rb_generic_ivar_memsize(VALUE obj)
|
|||
{
|
||||
struct gen_ivtbl *ivtbl;
|
||||
|
||||
if (gen_ivtbl_get(obj, 0, &ivtbl))
|
||||
if (rb_gen_ivtbl_get(obj, 0, &ivtbl))
|
||||
return gen_ivtbl_bytes(ivtbl->numiv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
MJIT_FUNC_EXPORTED shape_id_t
|
||||
rb_generic_shape_id(VALUE obj)
|
||||
{
|
||||
struct gen_ivtbl *ivtbl = 0;
|
||||
shape_id_t shape_id = 0;
|
||||
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
st_table* global_iv_table = generic_ivtbl(obj, 0, false);
|
||||
|
||||
if (global_iv_table && st_lookup(global_iv_table, obj, (st_data_t *)&ivtbl)) {
|
||||
shape_id = ivtbl->shape_id;
|
||||
}
|
||||
else if (OBJ_FROZEN(obj)) {
|
||||
shape_id = FROZEN_ROOT_SHAPE_ID;
|
||||
}
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
|
||||
return shape_id;
|
||||
}
|
||||
#endif
|
||||
|
||||
static size_t
|
||||
gen_ivtbl_count(const struct gen_ivtbl *ivtbl)
|
||||
{
|
||||
|
@ -1254,23 +1168,16 @@ VALUE
|
|||
rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
|
||||
{
|
||||
if (SPECIAL_CONST_P(obj)) return undef;
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
{
|
||||
uint32_t index;
|
||||
uint32_t len = ROBJECT_NUMIV(obj);
|
||||
VALUE *ptr = ROBJECT_IVPTR(obj);
|
||||
VALUE val;
|
||||
|
||||
if (iv_index_tbl_lookup(ROBJECT_IV_INDEX_TBL(obj), id, &index) &&
|
||||
index < len &&
|
||||
(val = ptr[index]) != Qundef) {
|
||||
return val;
|
||||
}
|
||||
else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
shape_id_t shape_id;
|
||||
VALUE * ivar_list;
|
||||
rb_shape_t * shape;
|
||||
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
shape_id = RBASIC_SHAPE_ID(obj);
|
||||
#endif
|
||||
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
{
|
||||
|
@ -1287,14 +1194,37 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
|
|||
return val;
|
||||
}
|
||||
else {
|
||||
break;
|
||||
return undef;
|
||||
}
|
||||
}
|
||||
case T_OBJECT:
|
||||
{
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
shape_id = ROBJECT_SHAPE_ID(obj);
|
||||
#endif
|
||||
ivar_list = ROBJECT_IVPTR(obj);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
if (FL_TEST(obj, FL_EXIVAR))
|
||||
return generic_ivar_get(obj, id, undef);
|
||||
if (FL_TEST_RAW(obj, FL_EXIVAR)) {
|
||||
struct gen_ivtbl *ivtbl;
|
||||
rb_gen_ivtbl_get(obj, id, &ivtbl);
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
shape_id = ivtbl->shape_id;
|
||||
#endif
|
||||
ivar_list = ivtbl->ivptr;
|
||||
} else {
|
||||
return undef;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
attr_index_t index = 0;
|
||||
shape = rb_shape_get_shape_by_id(shape_id);
|
||||
if (rb_shape_get_iv_index(shape, id, &index)) {
|
||||
return ivar_list[index];
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
|
@ -1315,26 +1245,12 @@ rb_attr_get(VALUE obj, ID id)
|
|||
static VALUE
|
||||
rb_ivar_delete(VALUE obj, ID id, VALUE undef)
|
||||
{
|
||||
VALUE *ptr;
|
||||
struct st_table *iv_index_tbl;
|
||||
uint32_t len, index;
|
||||
|
||||
rb_check_frozen(obj);
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
len = ROBJECT_NUMIV(obj);
|
||||
ptr = ROBJECT_IVPTR(obj);
|
||||
iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
|
||||
if (iv_index_tbl_lookup(iv_index_tbl, id, &index) &&
|
||||
index < len) {
|
||||
VALUE val = ptr[index];
|
||||
ptr[index] = Qundef;
|
||||
|
||||
if (val != Qundef) {
|
||||
return val;
|
||||
}
|
||||
}
|
||||
break;
|
||||
VALUE val = Qnil;
|
||||
attr_index_t index;
|
||||
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
|
||||
|
@ -1345,11 +1261,33 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef)
|
|||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (FL_TEST(obj, FL_EXIVAR))
|
||||
return generic_ivar_delete(obj, id, undef);
|
||||
case T_OBJECT: {
|
||||
rb_shape_t * shape = rb_shape_get_shape(obj);
|
||||
if (rb_shape_get_iv_index(shape, id, &index)) {
|
||||
rb_shape_transition_shape_remove_ivar(obj, id, shape);
|
||||
val = ROBJECT_IVPTR(obj)[index];
|
||||
ROBJECT_IVPTR(obj)[index] = Qundef;
|
||||
return val;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
rb_shape_t * shape = rb_shape_get_shape(obj);
|
||||
|
||||
if (rb_shape_get_iv_index(shape, id, &index)) {
|
||||
rb_shape_transition_shape_remove_ivar(obj, id, shape);
|
||||
struct gen_ivtbl *ivtbl;
|
||||
rb_gen_ivtbl_get(obj, id, &ivtbl);
|
||||
val = ivtbl->ivptr[index];
|
||||
ivtbl->ivptr[index] = Qundef;
|
||||
return val;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
|
@ -1359,67 +1297,31 @@ rb_attr_delete(VALUE obj, ID id)
|
|||
return rb_ivar_delete(obj, id, Qnil);
|
||||
}
|
||||
|
||||
static st_table *
|
||||
iv_index_tbl_make(VALUE obj, VALUE klass)
|
||||
{
|
||||
st_table *iv_index_tbl;
|
||||
|
||||
if (UNLIKELY(!klass)) {
|
||||
rb_raise(rb_eTypeError, "hidden object cannot have instance variables");
|
||||
}
|
||||
|
||||
if ((iv_index_tbl = RCLASS_IV_INDEX_TBL(klass)) == NULL) {
|
||||
RB_VM_LOCK_ENTER();
|
||||
if ((iv_index_tbl = RCLASS_IV_INDEX_TBL(klass)) == NULL) {
|
||||
iv_index_tbl = RCLASS_IV_INDEX_TBL(klass) = st_init_numtable();
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
}
|
||||
|
||||
return iv_index_tbl;
|
||||
}
|
||||
|
||||
static void
|
||||
iv_index_tbl_extend(struct ivar_update *ivup, ID id, VALUE klass)
|
||||
{
|
||||
ASSERT_vm_locking();
|
||||
st_data_t ent_data;
|
||||
struct rb_iv_index_tbl_entry *ent;
|
||||
|
||||
if (st_lookup(ivup->u.iv_index_tbl, (st_data_t)id, &ent_data)) {
|
||||
ent = (void *)ent_data;
|
||||
ivup->index = ent->index;
|
||||
return;
|
||||
}
|
||||
if (ivup->u.iv_index_tbl->num_entries >= INT_MAX) {
|
||||
rb_raise(rb_eArgError, "too many instance variables");
|
||||
}
|
||||
ent = ALLOC(struct rb_iv_index_tbl_entry);
|
||||
ent->index = ivup->index = (uint32_t)ivup->u.iv_index_tbl->num_entries;
|
||||
ent->class_value = klass;
|
||||
ent->class_serial = RCLASS_SERIAL(klass);
|
||||
st_add_direct(ivup->u.iv_index_tbl, (st_data_t)id, (st_data_t)ent);
|
||||
ivup->iv_extended = 1;
|
||||
}
|
||||
|
||||
static void
|
||||
generic_ivar_set(VALUE obj, ID id, VALUE val)
|
||||
{
|
||||
VALUE klass = rb_obj_class(obj);
|
||||
struct ivar_update ivup;
|
||||
ivup.iv_extended = 0;
|
||||
ivup.u.iv_index_tbl = iv_index_tbl_make(obj, klass);
|
||||
// The returned shape will have `id` in its iv_table
|
||||
rb_shape_t * shape = rb_shape_get_next(rb_shape_get_shape(obj), obj, id);
|
||||
ivup.shape = shape;
|
||||
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
iv_index_tbl_extend(&ivup, id, klass);
|
||||
st_update(generic_ivtbl(obj, id, false), (st_data_t)obj, generic_ivar_update,
|
||||
(st_data_t)&ivup);
|
||||
attr_index_t ent_data;
|
||||
if (rb_shape_get_iv_index(shape, id, &ent_data)) {
|
||||
ivup.iv_index = (uint32_t) ent_data;
|
||||
}
|
||||
else {
|
||||
rb_bug("unreachable. Shape was not found for id: %s", rb_id2name(id));
|
||||
}
|
||||
|
||||
st_update(generic_ivtbl(obj, id, false), (st_data_t)obj, generic_ivar_update, (st_data_t)&ivup);
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
|
||||
ivup.u.ivtbl->ivptr[ivup.index] = val;
|
||||
ivup.ivtbl->ivptr[ivup.iv_index] = val;
|
||||
|
||||
rb_shape_set_shape(obj, shape);
|
||||
RB_OBJ_WRITTEN(obj, Qundef, val);
|
||||
}
|
||||
|
||||
|
@ -1486,8 +1388,8 @@ rb_obj_transient_heap_evacuate(VALUE obj, int promote)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
init_iv_list(VALUE obj, uint32_t len, uint32_t newsize, st_table *index_tbl)
|
||||
void
|
||||
rb_ensure_iv_list_size(VALUE obj, uint32_t len, uint32_t newsize)
|
||||
{
|
||||
VALUE *ptr = ROBJECT_IVPTR(obj);
|
||||
VALUE *newptr;
|
||||
|
@ -1510,35 +1412,34 @@ init_iv_list(VALUE obj, uint32_t len, uint32_t newsize, st_table *index_tbl)
|
|||
#else
|
||||
ROBJECT(obj)->as.heap.numiv = newsize;
|
||||
#endif
|
||||
ROBJECT(obj)->as.heap.iv_index_tbl = index_tbl;
|
||||
}
|
||||
|
||||
struct gen_ivtbl *
|
||||
rb_ensure_generic_iv_list_size(VALUE obj, uint32_t newsize)
|
||||
{
|
||||
struct gen_ivtbl * ivtbl = 0;
|
||||
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
if (UNLIKELY(!gen_ivtbl_get_unlocked(obj, 0, &ivtbl) || newsize > ivtbl->numiv)) {
|
||||
ivtbl = gen_ivtbl_resize(ivtbl, newsize);
|
||||
st_insert(generic_ivtbl_no_ractor_check(obj), (st_data_t)obj, (st_data_t)ivtbl);
|
||||
FL_SET_RAW(obj, FL_EXIVAR);
|
||||
}
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
|
||||
RUBY_ASSERT(ivtbl);
|
||||
|
||||
return ivtbl;
|
||||
}
|
||||
|
||||
void
|
||||
rb_init_iv_list(VALUE obj)
|
||||
{
|
||||
st_table *index_tbl = ROBJECT_IV_INDEX_TBL(obj);
|
||||
uint32_t newsize = (uint32_t)index_tbl->num_entries;
|
||||
uint32_t newsize = rb_shape_get_shape(obj)->iv_count * 2.0;
|
||||
uint32_t len = ROBJECT_NUMIV(obj);
|
||||
init_iv_list(obj, len, newsize, index_tbl);
|
||||
}
|
||||
|
||||
// Retrieve or create the id-to-index mapping for a given object and an
|
||||
// instance variable name.
|
||||
static struct ivar_update
|
||||
obj_ensure_iv_index_mapping(VALUE obj, ID id)
|
||||
{
|
||||
VALUE klass = rb_obj_class(obj);
|
||||
struct ivar_update ivup;
|
||||
ivup.iv_extended = 0;
|
||||
ivup.u.iv_index_tbl = iv_index_tbl_make(obj, klass);
|
||||
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
iv_index_tbl_extend(&ivup, id, klass);
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
|
||||
return ivup;
|
||||
rb_ensure_iv_list_size(obj, len, newsize < len ? len : newsize);
|
||||
}
|
||||
|
||||
// Return the instance variable index for a given name and T_OBJECT object. The
|
||||
|
@ -1552,28 +1453,110 @@ uint32_t
|
|||
rb_obj_ensure_iv_index_mapping(VALUE obj, ID id)
|
||||
{
|
||||
RUBY_ASSERT(RB_TYPE_P(obj, T_OBJECT));
|
||||
// This uint32_t cast shouldn't lose information as it's checked in
|
||||
// iv_index_tbl_extend(). The index is stored as an uint32_t in
|
||||
// struct rb_iv_index_tbl_entry.
|
||||
return (uint32_t)obj_ensure_iv_index_mapping(obj, id).index;
|
||||
attr_index_t index;
|
||||
|
||||
// Ensure there is a transition for IVAR +id+
|
||||
rb_shape_transition_shape(obj, id, rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj)));
|
||||
|
||||
// Get the current shape
|
||||
rb_shape_t * shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
|
||||
|
||||
if (!rb_shape_get_iv_index(shape, id, &index)) {
|
||||
rb_bug("unreachable. Shape was not found for id: %s", rb_id2name(id));
|
||||
}
|
||||
|
||||
uint32_t len = ROBJECT_NUMIV(obj);
|
||||
if (len <= index) {
|
||||
uint32_t newsize = (shape->iv_count + 1) * 1.25;
|
||||
rb_ensure_iv_list_size(obj, len, newsize);
|
||||
}
|
||||
RUBY_ASSERT(index <= ROBJECT_NUMIV(obj));
|
||||
return index;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
obj_ivar_set(VALUE obj, ID id, VALUE val)
|
||||
{
|
||||
uint32_t len;
|
||||
struct ivar_update ivup = obj_ensure_iv_index_mapping(obj, id);
|
||||
|
||||
len = ROBJECT_NUMIV(obj);
|
||||
if (len <= ivup.index) {
|
||||
uint32_t newsize = iv_index_tbl_newsize(&ivup);
|
||||
init_iv_list(obj, len, newsize, ivup.u.iv_index_tbl);
|
||||
}
|
||||
RB_OBJ_WRITE(obj, &ROBJECT_IVPTR(obj)[ivup.index], val);
|
||||
|
||||
attr_index_t index = rb_obj_ensure_iv_index_mapping(obj, id);
|
||||
RB_OBJ_WRITE(obj, &ROBJECT_IVPTR(obj)[index], val);
|
||||
return val;
|
||||
}
|
||||
|
||||
/* Set the instance variable +val+ on object +obj+ at ivar name +id+.
|
||||
* This function only works with T_OBJECT objects, so make sure
|
||||
* +obj+ is of type T_OBJECT before using this function.
|
||||
*/
|
||||
VALUE
|
||||
rb_vm_set_ivar_id(VALUE obj, ID id, VALUE val)
|
||||
{
|
||||
rb_check_frozen_internal(obj);
|
||||
obj_ivar_set(obj, id, val);
|
||||
return val;
|
||||
}
|
||||
|
||||
bool
|
||||
rb_shape_set_shape_id(VALUE obj, shape_id_t shape_id)
|
||||
{
|
||||
if (rb_shape_get_shape_id(obj) == shape_id) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
RBASIC_SET_SHAPE_ID(obj, shape_id);
|
||||
#else
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
ROBJECT_SET_SHAPE_ID(obj, shape_id);
|
||||
break;
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
{
|
||||
RCLASS_EXT(obj)->shape_id = shape_id;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
if (shape_id != FROZEN_ROOT_SHAPE_ID) {
|
||||
struct gen_ivtbl *ivtbl = 0;
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
st_table* global_iv_table = generic_ivtbl(obj, 0, false);
|
||||
|
||||
if (st_lookup(global_iv_table, obj, (st_data_t *)&ivtbl)) {
|
||||
ivtbl->shape_id = shape_id;
|
||||
}
|
||||
else {
|
||||
rb_bug("Expected shape_id entry in global iv table");
|
||||
}
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prevents further modifications to the given object. ::rb_eFrozenError shall
|
||||
* be raised if modification is attempted.
|
||||
*
|
||||
* @param[out] x Object in question.
|
||||
*/
|
||||
void rb_obj_freeze_inline(VALUE x)
|
||||
{
|
||||
if (RB_FL_ABLE(x)) {
|
||||
RB_OBJ_FREEZE_RAW(x);
|
||||
|
||||
rb_shape_transition_shape_frozen(x);
|
||||
|
||||
if (RBASIC_CLASS(x) && !(RBASIC(x)->flags & RUBY_FL_SINGLETON)) {
|
||||
rb_freeze_singleton_class(x);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
ivar_set(VALUE obj, ID id, VALUE val)
|
||||
{
|
||||
|
@ -1581,10 +1564,14 @@ ivar_set(VALUE obj, ID id, VALUE val)
|
|||
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
{
|
||||
obj_ivar_set(obj, id, val);
|
||||
break;
|
||||
}
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
// TODO: Transition shapes on classes
|
||||
//rb_shape_transition_shape(obj, id, rb_shape_get_shape_by_id(RCLASS_SHAPE_ID(obj)));
|
||||
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
|
||||
rb_class_ivar_set(obj, id, val);
|
||||
break;
|
||||
|
@ -1614,161 +1601,86 @@ rb_ivar_set_internal(VALUE obj, ID id, VALUE val)
|
|||
VALUE
|
||||
rb_ivar_defined(VALUE obj, ID id)
|
||||
{
|
||||
VALUE val;
|
||||
struct st_table *iv_index_tbl;
|
||||
uint32_t index;
|
||||
attr_index_t index;
|
||||
|
||||
if (SPECIAL_CONST_P(obj)) return Qfalse;
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
|
||||
if (iv_index_tbl_lookup(iv_index_tbl, id, &index) &&
|
||||
index < ROBJECT_NUMIV(obj) &&
|
||||
(val = ROBJECT_IVPTR(obj)[index]) != Qundef) {
|
||||
return Qtrue;
|
||||
}
|
||||
break;
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
if (RCLASS_IV_TBL(obj) && lock_st_is_member(RCLASS_IV_TBL(obj), (st_data_t)id))
|
||||
if (RCLASS_IV_TBL(obj) && lock_st_is_member(RCLASS_IV_TBL(obj), (st_data_t)id)) {
|
||||
return Qtrue;
|
||||
break;
|
||||
default:
|
||||
if (FL_TEST(obj, FL_EXIVAR))
|
||||
return generic_ivar_defined(obj, id);
|
||||
break;
|
||||
}
|
||||
else {
|
||||
return Qfalse;
|
||||
}
|
||||
default:
|
||||
return RBOOL(rb_shape_get_iv_index(rb_shape_get_shape(obj), id, &index));
|
||||
}
|
||||
}
|
||||
|
||||
typedef int rb_ivar_foreach_callback_func(ID key, VALUE val, st_data_t arg);
|
||||
st_data_t rb_st_nth_key(st_table *tab, st_index_t index);
|
||||
|
||||
static ID
|
||||
iv_index_tbl_nth_id(st_table *iv_index_tbl, uint32_t index)
|
||||
{
|
||||
st_data_t key;
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
key = rb_st_nth_key(iv_index_tbl, index);
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
return (ID)key;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ivar_each_i(st_table *iv_index_tbl, VALUE val, uint32_t i, rb_ivar_foreach_callback_func *func, st_data_t arg)
|
||||
{
|
||||
static void
|
||||
iterate_over_shapes_with_callback(rb_shape_t *shape, VALUE* iv_list, rb_ivar_foreach_callback_func *callback, st_data_t arg) {
|
||||
switch ((enum shape_type)shape->type) {
|
||||
case SHAPE_ROOT:
|
||||
return;
|
||||
case SHAPE_IVAR:
|
||||
iterate_over_shapes_with_callback(shape->parent, iv_list, callback, arg);
|
||||
VALUE val = iv_list[shape->iv_count - 1];
|
||||
if (val != Qundef) {
|
||||
ID id = iv_index_tbl_nth_id(iv_index_tbl, i);
|
||||
switch (func(id, val, arg)) {
|
||||
case ST_CHECK:
|
||||
case ST_CONTINUE:
|
||||
break;
|
||||
case ST_STOP:
|
||||
return true;
|
||||
default:
|
||||
rb_bug("unreachable");
|
||||
callback(shape->edge_name, val, arg);
|
||||
}
|
||||
return;
|
||||
case SHAPE_IVAR_UNDEF:
|
||||
case SHAPE_FROZEN:
|
||||
iterate_over_shapes_with_callback(shape->parent, iv_list, callback, arg);
|
||||
return;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
obj_ivar_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
|
||||
{
|
||||
st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
|
||||
if (!iv_index_tbl) return;
|
||||
uint32_t i=0;
|
||||
|
||||
for (i=0; i < ROBJECT_NUMIV(obj); i++) {
|
||||
VALUE val = ROBJECT_IVPTR(obj)[i];
|
||||
if (ivar_each_i(iv_index_tbl, val, i, func, arg)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
rb_shape_t* shape = rb_shape_get_shape(obj);
|
||||
iterate_over_shapes_with_callback(shape, ROBJECT_IVPTR(obj), func, arg);
|
||||
}
|
||||
|
||||
static void
|
||||
gen_ivar_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
|
||||
{
|
||||
rb_shape_t *shape = rb_shape_get_shape(obj);
|
||||
struct gen_ivtbl *ivtbl;
|
||||
st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
|
||||
if (!iv_index_tbl) return;
|
||||
if (!gen_ivtbl_get(obj, 0, &ivtbl)) return;
|
||||
if (!rb_gen_ivtbl_get(obj, 0, &ivtbl)) return;
|
||||
|
||||
for (uint32_t i=0; i<ivtbl->numiv; i++) {
|
||||
VALUE val = ivtbl->ivptr[i];
|
||||
if (ivar_each_i(iv_index_tbl, val, i, func, arg)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct givar_copy {
|
||||
VALUE obj;
|
||||
VALUE klass;
|
||||
st_table *iv_index_tbl;
|
||||
struct gen_ivtbl *ivtbl;
|
||||
};
|
||||
|
||||
static int
|
||||
gen_ivar_copy(ID id, VALUE val, st_data_t arg)
|
||||
{
|
||||
struct givar_copy *c = (struct givar_copy *)arg;
|
||||
struct ivar_update ivup;
|
||||
|
||||
ivup.iv_extended = 0;
|
||||
ivup.u.iv_index_tbl = c->iv_index_tbl;
|
||||
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
iv_index_tbl_extend(&ivup, id, c->klass);
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
|
||||
if (ivup.index >= c->ivtbl->numiv) {
|
||||
uint32_t newsize = iv_index_tbl_newsize(&ivup);
|
||||
c->ivtbl = gen_ivtbl_resize(c->ivtbl, newsize);
|
||||
}
|
||||
c->ivtbl->ivptr[ivup.index] = val;
|
||||
|
||||
RB_OBJ_WRITTEN(c->obj, Qundef, val);
|
||||
|
||||
return ST_CONTINUE;
|
||||
iterate_over_shapes_with_callback(shape, ivtbl->ivptr, func, arg);
|
||||
}
|
||||
|
||||
void
|
||||
rb_copy_generic_ivar(VALUE clone, VALUE obj)
|
||||
{
|
||||
struct gen_ivtbl *ivtbl;
|
||||
struct gen_ivtbl *obj_ivtbl;
|
||||
struct gen_ivtbl *new_ivtbl;
|
||||
|
||||
rb_check_frozen(clone);
|
||||
|
||||
if (!FL_TEST(obj, FL_EXIVAR)) {
|
||||
goto clear;
|
||||
}
|
||||
if (gen_ivtbl_get(obj, 0, &ivtbl)) {
|
||||
struct givar_copy c;
|
||||
uint32_t i;
|
||||
|
||||
if (gen_ivtbl_count(ivtbl) == 0)
|
||||
if (rb_gen_ivtbl_get(obj, 0, &obj_ivtbl)) {
|
||||
if (gen_ivtbl_count(obj_ivtbl) == 0)
|
||||
goto clear;
|
||||
|
||||
if (gen_ivtbl_get(clone, 0, &c.ivtbl)) {
|
||||
for (i = 0; i < c.ivtbl->numiv; i++)
|
||||
c.ivtbl->ivptr[i] = Qundef;
|
||||
}
|
||||
else {
|
||||
c.ivtbl = gen_ivtbl_resize(0, ivtbl->numiv);
|
||||
new_ivtbl = gen_ivtbl_resize(0, obj_ivtbl->numiv);
|
||||
FL_SET(clone, FL_EXIVAR);
|
||||
|
||||
for (uint32_t i=0; i<obj_ivtbl->numiv; i++) {
|
||||
new_ivtbl->ivptr[i] = obj_ivtbl->ivptr[i];
|
||||
RB_OBJ_WRITTEN(clone, Qundef, &new_ivtbl[i]);
|
||||
}
|
||||
|
||||
VALUE klass = rb_obj_class(clone);
|
||||
c.iv_index_tbl = iv_index_tbl_make(clone, klass);
|
||||
c.obj = clone;
|
||||
c.klass = klass;
|
||||
gen_ivar_each(obj, gen_ivar_copy, (st_data_t)&c);
|
||||
/*
|
||||
* c.ivtbl may change in gen_ivar_copy due to realloc,
|
||||
* no need to free
|
||||
|
@ -1776,9 +1688,17 @@ rb_copy_generic_ivar(VALUE clone, VALUE obj)
|
|||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
generic_ivtbl_no_ractor_check(clone);
|
||||
st_insert(generic_ivtbl_no_ractor_check(obj), (st_data_t)clone, (st_data_t)c.ivtbl);
|
||||
st_insert(generic_ivtbl_no_ractor_check(obj), (st_data_t)clone, (st_data_t)new_ivtbl);
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
|
||||
rb_shape_t * obj_shape = rb_shape_get_shape(obj);
|
||||
if (rb_shape_frozen_shape_p(obj_shape)) {
|
||||
rb_shape_set_shape(clone, obj_shape->parent);
|
||||
}
|
||||
else {
|
||||
rb_shape_set_shape(clone, obj_shape);
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
|
@ -1846,7 +1766,7 @@ rb_ivar_count(VALUE obj)
|
|||
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
if (ROBJECT_IV_INDEX_TBL(obj) != 0) {
|
||||
if (rb_shape_get_shape(obj)->iv_count > 0) {
|
||||
st_index_t i, count, num = ROBJECT_NUMIV(obj);
|
||||
const VALUE *const ivptr = ROBJECT_IVPTR(obj);
|
||||
for (i = count = 0; i < num; ++i) {
|
||||
|
@ -1867,7 +1787,7 @@ rb_ivar_count(VALUE obj)
|
|||
if (FL_TEST(obj, FL_EXIVAR)) {
|
||||
struct gen_ivtbl *ivtbl;
|
||||
|
||||
if (gen_ivtbl_get(obj, 0, &ivtbl)) {
|
||||
if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
|
||||
return gen_ivtbl_count(ivtbl);
|
||||
}
|
||||
}
|
||||
|
@ -1965,41 +1885,54 @@ rb_obj_remove_instance_variable(VALUE obj, VALUE name)
|
|||
{
|
||||
VALUE val = Qnil;
|
||||
const ID id = id_for_var(obj, name, an, instance);
|
||||
st_data_t n, v;
|
||||
struct st_table *iv_index_tbl;
|
||||
uint32_t index;
|
||||
|
||||
// Frozen check comes here because it's expected that we raise a
|
||||
// NameError (from the id_for_var check) before we raise a FrozenError
|
||||
rb_check_frozen(obj);
|
||||
|
||||
attr_index_t index;
|
||||
|
||||
if (!id) {
|
||||
goto not_defined;
|
||||
}
|
||||
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
|
||||
if (iv_index_tbl_lookup(iv_index_tbl, id, &index) &&
|
||||
index < ROBJECT_NUMIV(obj) &&
|
||||
(val = ROBJECT_IVPTR(obj)[index]) != Qundef) {
|
||||
ROBJECT_IVPTR(obj)[index] = Qundef;
|
||||
return val;
|
||||
}
|
||||
break;
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
|
||||
n = id;
|
||||
if (RCLASS_IV_TBL(obj) && lock_st_delete(RCLASS_IV_TBL(obj), &n, &v)) {
|
||||
return (VALUE)v;
|
||||
if (RCLASS_IV_TBL(obj)) {
|
||||
st_data_t id_data = (st_data_t)id, val;
|
||||
if (lock_st_delete(RCLASS_IV_TBL(obj), &id_data, &val)) {
|
||||
return (VALUE)val;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (FL_TEST(obj, FL_EXIVAR)) {
|
||||
if (generic_ivar_remove(obj, id, &val)) {
|
||||
case T_OBJECT: {
|
||||
rb_shape_t * shape = rb_shape_get_shape(obj);
|
||||
if (rb_shape_get_iv_index(shape, id, &index)) {
|
||||
rb_shape_transition_shape_remove_ivar(obj, id, shape);
|
||||
val = ROBJECT_IVPTR(obj)[index];
|
||||
ROBJECT_IVPTR(obj)[index] = Qundef;
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
rb_shape_t * shape = rb_shape_get_shape(obj);
|
||||
|
||||
if (rb_shape_get_iv_index(shape, id, &index)) {
|
||||
rb_shape_transition_shape_remove_ivar(obj, id, shape);
|
||||
struct gen_ivtbl *ivtbl;
|
||||
rb_gen_ivtbl_get(obj, id, &ivtbl);
|
||||
val = ivtbl->ivptr[index];
|
||||
ivtbl->ivptr[index] = Qundef;
|
||||
return val;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
not_defined:
|
||||
rb_name_err_raise("instance variable %1$s not defined",
|
||||
|
|
10
variable.h
10
variable.h
|
@ -11,11 +11,19 @@
|
|||
/* per-object */
|
||||
|
||||
struct gen_ivtbl {
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
uint16_t shape_id;
|
||||
#endif
|
||||
uint32_t numiv;
|
||||
VALUE ivptr[FLEX_ARY_LEN];
|
||||
};
|
||||
|
||||
int rb_ivar_generic_ivtbl_lookup(VALUE obj, struct gen_ivtbl **);
|
||||
VALUE rb_ivar_generic_lookup_with_index(VALUE obj, ID id, uint32_t index);
|
||||
|
||||
#include "shape.h"
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
shape_id_t rb_generic_shape_id(VALUE obj);
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* RUBY_TOPLEVEL_VARIABLE_H */
|
||||
|
|
31
vm.c
31
vm.c
|
@ -26,6 +26,7 @@
|
|||
#include "internal/thread.h"
|
||||
#include "internal/vm.h"
|
||||
#include "internal/sanitizers.h"
|
||||
#include "internal/variable.h"
|
||||
#include "iseq.h"
|
||||
#include "mjit.h"
|
||||
#include "yjit.h"
|
||||
|
@ -4021,6 +4022,11 @@ Init_BareVM(void)
|
|||
rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
||||
void
|
||||
Init_vm_objects(void)
|
||||
{
|
||||
|
@ -4032,6 +4038,31 @@ Init_vm_objects(void)
|
|||
vm->mark_object_ary = rb_ary_hidden_new(128);
|
||||
vm->loading_table = st_init_strtable();
|
||||
vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000);
|
||||
|
||||
#if HAVE_MMAP
|
||||
vm->shape_list = (rb_shape_t *)mmap(NULL, rb_size_mul_or_raise(SHAPE_BITMAP_SIZE * 32, sizeof(rb_shape_t), rb_eRuntimeError),
|
||||
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (vm->shape_list == MAP_FAILED) {
|
||||
vm->shape_list = 0;
|
||||
}
|
||||
#else
|
||||
vm->shape_list = xcalloc(SHAPE_BITMAP_SIZE * 32, sizeof(rb_shape_t));
|
||||
#endif
|
||||
|
||||
if (!vm->shape_list) {
|
||||
rb_memerror();
|
||||
}
|
||||
|
||||
// Root shape
|
||||
vm->root_shape = rb_shape_alloc(0, 0);
|
||||
RUBY_ASSERT(rb_shape_id(vm->root_shape) == ROOT_SHAPE_ID);
|
||||
|
||||
// Frozen root shape
|
||||
vm->frozen_root_shape = rb_shape_alloc(rb_make_internal_id(), vm->root_shape);
|
||||
vm->frozen_root_shape->type = (uint8_t)SHAPE_FROZEN;
|
||||
RUBY_ASSERT(rb_shape_id(vm->frozen_root_shape) == FROZEN_ROOT_SHAPE_ID);
|
||||
|
||||
vm->next_shape_id = 2;
|
||||
}
|
||||
|
||||
/* Stub for builtin function when not building YJIT units*/
|
||||
|
|
108
vm_callinfo.h
108
vm_callinfo.h
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include "debug_counter.h"
|
||||
#include "internal/class.h"
|
||||
#include "shape.h"
|
||||
|
||||
enum vm_call_flag_bits {
|
||||
VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
|
||||
|
@ -284,14 +285,32 @@ struct rb_callcache {
|
|||
const vm_call_handler call_;
|
||||
|
||||
union {
|
||||
const unsigned int attr_index;
|
||||
struct {
|
||||
const attr_index_t index;
|
||||
shape_id_t dest_shape_id;
|
||||
} attr;
|
||||
const enum method_missing_reason method_missing_reason; /* used by method_missing */
|
||||
VALUE v;
|
||||
} aux_;
|
||||
};
|
||||
|
||||
#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER0
|
||||
#define VM_CALLCACHE_ON_STACK IMEMO_FL_USER1
|
||||
#define VM_CALLCACHE_UNMARKABLE FL_FREEZE
|
||||
#define VM_CALLCACHE_ON_STACK FL_EXIVAR
|
||||
|
||||
extern const struct rb_callcache *rb_vm_empty_cc(void);
|
||||
extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
|
||||
|
||||
#define vm_cc_empty() rb_vm_empty_cc()
|
||||
|
||||
static inline void
|
||||
vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
VM_ASSERT(cc != vm_cc_empty());
|
||||
IMEMO_SET_CACHED_SHAPE_ID((VALUE)cc, shape_id);
|
||||
*(attr_index_t *)&cc->aux_.attr.index = 0;
|
||||
*(shape_id_t *)&cc->aux_.attr.dest_shape_id = shape_id;
|
||||
}
|
||||
|
||||
static inline const struct rb_callcache *
|
||||
vm_cc_new(VALUE klass,
|
||||
|
@ -299,6 +318,7 @@ vm_cc_new(VALUE klass,
|
|||
vm_call_handler call)
|
||||
{
|
||||
const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
|
||||
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
|
||||
RB_DEBUG_COUNTER_INC(cc_new);
|
||||
return cc;
|
||||
}
|
||||
|
@ -350,30 +370,71 @@ vm_cc_call(const struct rb_callcache *cc)
|
|||
return cc->call_;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
static inline attr_index_t
|
||||
vm_cc_attr_index(const struct rb_callcache *cc)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
return cc->aux_.attr_index - 1;
|
||||
return cc->aux_.attr.index - 1;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
vm_cc_attr_index_p(const struct rb_callcache *cc)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
return cc->aux_.attr_index > 0;
|
||||
return cc->aux_.attr.index != 0;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
vm_ic_entry_index(const struct iseq_inline_iv_cache_entry *ic)
|
||||
static inline shape_id_t
|
||||
vm_cc_attr_index_source_shape_id(const struct rb_callcache *cc)
|
||||
{
|
||||
return ic->entry->index;
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
|
||||
return IMEMO_CACHED_SHAPE_ID((VALUE)cc);
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_cc_attr_shape_id(const struct rb_callcache *cc)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
return vm_cc_attr_index_source_shape_id(cc);
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
|
||||
return cc->aux_.attr.dest_shape_id;
|
||||
}
|
||||
|
||||
static inline attr_index_t
|
||||
vm_ic_attr_index(const struct iseq_inline_iv_cache_entry *ic)
|
||||
{
|
||||
return ic->attr_index - 1;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
vm_ic_entry_p(const struct iseq_inline_iv_cache_entry *ic)
|
||||
vm_ic_attr_index_p(const struct iseq_inline_iv_cache_entry *ic)
|
||||
{
|
||||
return ic->entry;
|
||||
return ic->attr_index > 0;
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_ic_attr_shape_id(const struct iseq_inline_iv_cache_entry *ic)
|
||||
{
|
||||
return ic->source_shape_id;
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_ic_attr_index_source_shape_id(const struct iseq_inline_iv_cache_entry *ic)
|
||||
{
|
||||
return ic->source_shape_id;
|
||||
}
|
||||
|
||||
static inline shape_id_t
|
||||
vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry *ic)
|
||||
{
|
||||
return ic->dest_shape_id;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
|
@ -407,10 +468,6 @@ vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *c
|
|||
}
|
||||
}
|
||||
|
||||
extern const struct rb_callcache *rb_vm_empty_cc(void);
|
||||
extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
|
||||
#define vm_cc_empty() rb_vm_empty_cc()
|
||||
|
||||
/* callcache: mutate */
|
||||
|
||||
static inline void
|
||||
|
@ -422,26 +479,29 @@ vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
|
|||
}
|
||||
|
||||
static inline void
|
||||
vm_cc_attr_index_set(const struct rb_callcache *cc, int index)
|
||||
vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t source_shape_id, shape_id_t dest_shape_id)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
VM_ASSERT(cc != vm_cc_empty());
|
||||
*(int *)&cc->aux_.attr_index = index + 1;
|
||||
IMEMO_SET_CACHED_SHAPE_ID((VALUE)cc, source_shape_id);
|
||||
*(attr_index_t *)&cc->aux_.attr.index = (index + 1);
|
||||
*(shape_id_t *)&cc->aux_.attr.dest_shape_id = dest_shape_id;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vm_ic_entry_set(struct iseq_inline_iv_cache_entry *ic, struct rb_iv_index_tbl_entry *entry, const rb_iseq_t *iseq)
|
||||
vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t source_shape_id, shape_id_t dest_shape_id)
|
||||
{
|
||||
ic->entry = entry;
|
||||
RB_OBJ_WRITTEN(iseq, Qundef, entry->class_value);
|
||||
*(shape_id_t *)&ic->source_shape_id = source_shape_id;
|
||||
*(shape_id_t *)&ic->dest_shape_id = dest_shape_id;
|
||||
*(attr_index_t *)&ic->attr_index = index + 1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vm_cc_attr_index_initialize(const struct rb_callcache *cc)
|
||||
vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
|
||||
{
|
||||
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
|
||||
VM_ASSERT(cc != vm_cc_empty());
|
||||
*(int *)&cc->aux_.attr_index = 0;
|
||||
*(shape_id_t *)&ic->source_shape_id = shape_id;
|
||||
*(shape_id_t *)&ic->dest_shape_id = shape_id;
|
||||
*(attr_index_t *)&ic->attr_index = 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
11
vm_core.h
11
vm_core.h
|
@ -99,6 +99,7 @@ extern int ruby_assert_critical_section_entered;
|
|||
#include "ruby/st.h"
|
||||
#include "ruby_atomic.h"
|
||||
#include "vm_opts.h"
|
||||
#include "shape.h"
|
||||
|
||||
#include "ruby/thread_native.h"
|
||||
|
||||
|
@ -272,7 +273,9 @@ struct iseq_inline_constant_cache {
|
|||
};
|
||||
|
||||
struct iseq_inline_iv_cache_entry {
|
||||
struct rb_iv_index_tbl_entry *entry;
|
||||
shape_id_t source_shape_id;
|
||||
shape_id_t dest_shape_id;
|
||||
attr_index_t attr_index;
|
||||
};
|
||||
|
||||
struct iseq_inline_cvar_cache_entry {
|
||||
|
@ -687,6 +690,12 @@ typedef struct rb_vm_struct {
|
|||
VALUE mark_object_ary;
|
||||
const VALUE special_exceptions[ruby_special_error_count];
|
||||
|
||||
/* object shapes */
|
||||
rb_shape_t *shape_list;
|
||||
rb_shape_t *root_shape;
|
||||
rb_shape_t *frozen_root_shape;
|
||||
shape_id_t next_shape_id;
|
||||
|
||||
/* load */
|
||||
VALUE top_self;
|
||||
VALUE load_path;
|
||||
|
|
|
@ -48,7 +48,7 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE
|
|||
{
|
||||
struct rb_calling_info calling = {
|
||||
.ci = &VM_CI_ON_STACK(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL),
|
||||
.cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, { 0 }, cme),
|
||||
.cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, {{ 0 }}, cme),
|
||||
.block_handler = vm_passed_block_handler(ec),
|
||||
.recv = recv,
|
||||
.argc = argc,
|
||||
|
@ -90,7 +90,7 @@ vm_call0_cc(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE
|
|||
static VALUE
|
||||
vm_call0_cme(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv, const rb_callable_method_entry_t *cme)
|
||||
{
|
||||
calling->cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, { 0 }, cme);
|
||||
calling->cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, {{ 0 }}, cme);
|
||||
return vm_call0_body(ec, calling, argv);
|
||||
}
|
||||
|
||||
|
|
465
vm_insnhelper.c
465
vm_insnhelper.c
|
@ -50,6 +50,11 @@ MJIT_STATIC VALUE
|
|||
ruby_vm_special_exception_copy(VALUE exc)
|
||||
{
|
||||
VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
|
||||
rb_shape_t * shape = rb_shape_get_shape(exc);
|
||||
if (rb_shape_frozen_shape_p(shape)) {
|
||||
shape = shape->parent;
|
||||
}
|
||||
rb_shape_set_shape(e, shape);
|
||||
rb_obj_copy_ivar(e, exc);
|
||||
return e;
|
||||
}
|
||||
|
@ -1086,35 +1091,17 @@ vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_l
|
|||
return klass;
|
||||
}
|
||||
|
||||
static bool
|
||||
iv_index_tbl_lookup(struct st_table *iv_index_tbl, ID id, struct rb_iv_index_tbl_entry **ent)
|
||||
{
|
||||
int found;
|
||||
st_data_t ent_data;
|
||||
|
||||
if (iv_index_tbl == NULL) return false;
|
||||
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
found = st_lookup(iv_index_tbl, (st_data_t)id, &ent_data);
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
if (found) *ent = (struct rb_iv_index_tbl_entry *)ent_data;
|
||||
|
||||
return found ? true : false;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent));
|
||||
|
||||
ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
|
||||
static inline void
|
||||
fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent)
|
||||
fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
|
||||
{
|
||||
// fill cache
|
||||
if (!is_attr) {
|
||||
vm_ic_entry_set(ic, ent, iseq);
|
||||
if (is_attr) {
|
||||
if (vm_cc_markable(cc)) {
|
||||
vm_cc_attr_index_set(cc, index, shape_id, shape_id);
|
||||
}
|
||||
}
|
||||
else {
|
||||
vm_cc_attr_index_set(cc, ent->index);
|
||||
vm_ic_attr_index_set(iseq, ic, index, shape_id, shape_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1129,67 +1116,119 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
|
|||
{
|
||||
#if OPT_IC_FOR_IVAR
|
||||
VALUE val = Qundef;
|
||||
shape_id_t shape_id;
|
||||
VALUE * ivar_list;
|
||||
|
||||
if (SPECIAL_CONST_P(obj)) {
|
||||
// frozen?
|
||||
}
|
||||
else if (LIKELY(is_attr ?
|
||||
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index_p(cc)) :
|
||||
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial, vm_ic_entry_p(ic) && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
|
||||
uint32_t index = !is_attr ? vm_ic_entry_index(ic): (vm_cc_attr_index(cc));
|
||||
|
||||
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
|
||||
|
||||
if (LIKELY(BUILTIN_TYPE(obj) == T_OBJECT) &&
|
||||
LIKELY(index < ROBJECT_NUMIV(obj))) {
|
||||
val = ROBJECT_IVPTR(obj)[index];
|
||||
|
||||
VM_ASSERT(ractor_object_incidental_shareable_p(obj, val));
|
||||
}
|
||||
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
|
||||
val = rb_ivar_generic_lookup_with_index(obj, id, index);
|
||||
return Qnil;
|
||||
}
|
||||
|
||||
goto ret;
|
||||
}
|
||||
else {
|
||||
struct rb_iv_index_tbl_entry *ent;
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
shape_id = RBASIC_SHAPE_ID(obj);
|
||||
#endif
|
||||
|
||||
if (BUILTIN_TYPE(obj) == T_OBJECT) {
|
||||
struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
ivar_list = ROBJECT_IVPTR(obj);
|
||||
VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
|
||||
|
||||
if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
|
||||
fill_ivar_cache(iseq, ic, cc, is_attr, ent);
|
||||
|
||||
// get value
|
||||
if (ent->index < ROBJECT_NUMIV(obj)) {
|
||||
val = ROBJECT_IVPTR(obj)[ent->index];
|
||||
|
||||
VM_ASSERT(ractor_object_incidental_shareable_p(obj, val));
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
|
||||
struct st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
|
||||
|
||||
if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
|
||||
fill_ivar_cache(iseq, ic, cc, is_attr, ent);
|
||||
val = rb_ivar_generic_lookup_with_index(obj, id, ent->index);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// T_CLASS / T_MODULE
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
shape_id = ROBJECT_SHAPE_ID(obj);
|
||||
#endif
|
||||
break;
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
{
|
||||
goto general_path;
|
||||
}
|
||||
default:
|
||||
if (FL_TEST_RAW(obj, FL_EXIVAR)) {
|
||||
struct gen_ivtbl *ivtbl;
|
||||
rb_gen_ivtbl_get(obj, id, &ivtbl);
|
||||
#if !SHAPE_IN_BASIC_FLAGS
|
||||
shape_id = ivtbl->shape_id;
|
||||
#endif
|
||||
ivar_list = ivtbl->ivptr;
|
||||
} else {
|
||||
return Qnil;
|
||||
}
|
||||
}
|
||||
|
||||
ret:
|
||||
if (LIKELY(val != Qundef)) {
|
||||
return val;
|
||||
shape_id_t cached_id;
|
||||
|
||||
if (is_attr) {
|
||||
cached_id = vm_cc_attr_shape_id(cc);
|
||||
}
|
||||
else {
|
||||
cached_id = vm_ic_attr_shape_id(ic);
|
||||
}
|
||||
|
||||
attr_index_t index;
|
||||
|
||||
if (LIKELY(cached_id == shape_id)) {
|
||||
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
|
||||
|
||||
if (is_attr && vm_cc_attr_index_p(cc)) {
|
||||
index = vm_cc_attr_index(cc);
|
||||
}
|
||||
else if (!is_attr && vm_ic_attr_index_p(ic)) {
|
||||
index = vm_ic_attr_index(ic);
|
||||
}
|
||||
else {
|
||||
return Qnil;
|
||||
}
|
||||
|
||||
val = ivar_list[index];
|
||||
VM_ASSERT(BUILTIN_TYPE(obj) == T_OBJECT && rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
|
||||
}
|
||||
else { // cache miss case
|
||||
#if RUBY_DEBUG
|
||||
if (is_attr) {
|
||||
if (cached_id != INVALID_SHAPE_ID) {
|
||||
RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
|
||||
} else {
|
||||
RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (cached_id != INVALID_SHAPE_ID) {
|
||||
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
|
||||
} else {
|
||||
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
attr_index_t index;
|
||||
rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
|
||||
|
||||
if (rb_shape_get_iv_index(shape, id, &index)) {
|
||||
// This fills in the cache with the shared cache object.
|
||||
// "ent" is the shared cache object
|
||||
fill_ivar_cache(iseq, ic, cc, is_attr, index, shape_id);
|
||||
|
||||
// We fetched the ivar list above
|
||||
val = ivar_list[index];
|
||||
}
|
||||
else {
|
||||
if (is_attr) {
|
||||
if (vm_cc_markable(cc)) {
|
||||
vm_cc_attr_index_initialize(cc, shape_id);
|
||||
}
|
||||
}
|
||||
else {
|
||||
vm_ic_attr_index_initialize(ic, shape_id);
|
||||
}
|
||||
|
||||
val = Qnil;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
RUBY_ASSERT(val != Qundef);
|
||||
|
||||
return val;
|
||||
|
||||
general_path:
|
||||
#endif /* OPT_IC_FOR_IVAR */
|
||||
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
|
||||
|
@ -1202,40 +1241,91 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
populate_cache(attr_index_t index, shape_id_t shape_id, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
|
||||
{
|
||||
// Cache population code
|
||||
if (is_attr) {
|
||||
if (vm_cc_markable(cc)) {
|
||||
vm_cc_attr_index_set(cc, index, shape_id, next_shape_id);
|
||||
}
|
||||
}
|
||||
else {
|
||||
vm_ic_attr_index_set(iseq, ic, index, shape_id, next_shape_id);
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
|
||||
NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
|
||||
NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
|
||||
|
||||
static VALUE
|
||||
vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
|
||||
{
|
||||
#if OPT_IC_FOR_IVAR
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
{
|
||||
rb_check_frozen_internal(obj);
|
||||
|
||||
#if OPT_IC_FOR_IVAR
|
||||
if (RB_TYPE_P(obj, T_OBJECT)) {
|
||||
struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
|
||||
struct rb_iv_index_tbl_entry *ent;
|
||||
attr_index_t index;
|
||||
|
||||
if (iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
|
||||
if (!is_attr) {
|
||||
vm_ic_entry_set(ic, ent, iseq);
|
||||
uint32_t num_iv = ROBJECT_NUMIV(obj);
|
||||
rb_shape_t* shape = rb_shape_get_shape(obj);
|
||||
shape_id_t current_shape_id = ROBJECT_SHAPE_ID(obj);
|
||||
shape_id_t next_shape_id = current_shape_id;
|
||||
|
||||
rb_shape_t* next_shape = rb_shape_get_next(shape, obj, id);
|
||||
|
||||
if (shape != next_shape) {
|
||||
rb_shape_set_shape(obj, next_shape);
|
||||
next_shape_id = ROBJECT_SHAPE_ID(obj);
|
||||
}
|
||||
else if (ent->index >= INT_MAX) {
|
||||
|
||||
if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree
|
||||
if (index >= MAX_IVARS) {
|
||||
rb_raise(rb_eArgError, "too many instance variables");
|
||||
}
|
||||
|
||||
populate_cache(index, current_shape_id, next_shape_id, id, iseq, ic, cc, is_attr);
|
||||
}
|
||||
else {
|
||||
vm_cc_attr_index_set(cc, (int)(ent->index));
|
||||
rb_bug("Didn't find instance variable %s\n", rb_id2name(id));
|
||||
}
|
||||
|
||||
uint32_t index = ent->index;
|
||||
|
||||
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
|
||||
// Ensure the IV buffer is wide enough to store the IV
|
||||
if (UNLIKELY(index >= num_iv)) {
|
||||
rb_init_iv_list(obj);
|
||||
}
|
||||
|
||||
VALUE *ptr = ROBJECT_IVPTR(obj);
|
||||
RB_OBJ_WRITE(obj, &ptr[index], val);
|
||||
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
|
||||
|
||||
return val;
|
||||
}
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
break;
|
||||
default:
|
||||
{
|
||||
shape_id_t shape_id = rb_shape_get_shape_id(obj);
|
||||
rb_ivar_set(obj, id, val);
|
||||
shape_id_t next_shape_id = rb_shape_get_shape_id(obj);
|
||||
rb_shape_t *next_shape = rb_shape_get_shape_by_id(next_shape_id);
|
||||
attr_index_t index;
|
||||
|
||||
if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree
|
||||
if (index >= MAX_IVARS) {
|
||||
rb_raise(rb_eArgError, "too many instance variables");
|
||||
}
|
||||
|
||||
populate_cache(index, shape_id, next_shape_id, id, iseq, ic, cc, is_attr);
|
||||
}
|
||||
else {
|
||||
rb_bug("didn't find the id\n");
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
@ -1256,39 +1346,94 @@ vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache
|
|||
return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
|
||||
}
|
||||
|
||||
NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index));
|
||||
static VALUE
|
||||
vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
|
||||
{
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
|
||||
#else
|
||||
shape_id_t shape_id = rb_generic_shape_id(obj);
|
||||
#endif
|
||||
|
||||
// Cache hit case
|
||||
if (shape_id == source_shape_id) {
|
||||
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
|
||||
|
||||
struct gen_ivtbl *ivtbl = 0;
|
||||
if (dest_shape_id != shape_id) {
|
||||
ivtbl = rb_ensure_generic_iv_list_size(obj, index + 1);
|
||||
#if SHAPE_IN_BASIC_FLAGS
|
||||
RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
|
||||
#else
|
||||
ivtbl->shape_id = dest_shape_id;
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
// Just get the IV table
|
||||
rb_gen_ivtbl_get(obj, 0, &ivtbl);
|
||||
}
|
||||
|
||||
VALUE *ptr = ivtbl->ivptr;
|
||||
|
||||
RB_OBJ_WRITE(obj, &ptr[index], val);
|
||||
|
||||
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
return Qundef;
|
||||
}
|
||||
|
||||
static inline VALUE
|
||||
vm_setivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
|
||||
vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
|
||||
{
|
||||
#if OPT_IC_FOR_IVAR
|
||||
if (LIKELY(RB_TYPE_P(obj, T_OBJECT)) &&
|
||||
LIKELY(!RB_OBJ_FROZEN_RAW(obj))) {
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
{
|
||||
VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
|
||||
// If object's shape id is the same as the source
|
||||
// then do the shape transition and write the ivar
|
||||
// If object's shape id is the same as the dest
|
||||
// then write the ivar
|
||||
shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
|
||||
|
||||
// Do we have a cache hit *and* is the CC intitialized
|
||||
if (shape_id == source_shape_id) {
|
||||
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
|
||||
|
||||
VM_ASSERT(!rb_ractor_shareable_p(obj));
|
||||
|
||||
if (LIKELY(
|
||||
(!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, vm_ic_entry_p(ic) && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass))) ||
|
||||
( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index_p(cc))))) {
|
||||
uint32_t index = !is_attr ? vm_ic_entry_index(ic) : vm_cc_attr_index(cc);
|
||||
|
||||
if (dest_shape_id != shape_id) {
|
||||
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
|
||||
rb_init_iv_list(obj);
|
||||
}
|
||||
ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
|
||||
}
|
||||
|
||||
RUBY_ASSERT(index < ROBJECT_NUMIV(obj));
|
||||
|
||||
VALUE *ptr = ROBJECT_IVPTR(obj);
|
||||
|
||||
RB_OBJ_WRITE(obj, &ptr[index], val);
|
||||
|
||||
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
|
||||
return val; /* inline cache hit */
|
||||
|
||||
return val;
|
||||
}
|
||||
}
|
||||
else {
|
||||
break;
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return Qundef;
|
||||
#endif /* OPT_IC_FOR_IVAR */
|
||||
if (is_attr) {
|
||||
return vm_setivar_slowpath_attr(obj, id, val, cc);
|
||||
}
|
||||
else {
|
||||
return vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
|
||||
}
|
||||
}
|
||||
|
||||
static VALUE
|
||||
|
@ -1383,7 +1528,22 @@ vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
|
|||
static inline void
|
||||
vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
|
||||
{
|
||||
vm_setivar(obj, id, val, iseq, ic, 0, 0);
|
||||
shape_id_t source_shape_id = vm_ic_attr_index_source_shape_id(ic);
|
||||
attr_index_t index = vm_ic_attr_index(ic);
|
||||
shape_id_t dest_shape_id = vm_ic_attr_index_dest_shape_id(ic);
|
||||
if (UNLIKELY(vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index) == Qundef)) {
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
break;
|
||||
default:
|
||||
if (vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index) != Qundef) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1392,28 +1552,6 @@ rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IV
|
|||
vm_setinstancevariable(iseq, obj, id, val, ic);
|
||||
}
|
||||
|
||||
/* Set the instance variable +val+ on object +obj+ at the +index+.
|
||||
* This function only works with T_OBJECT objects, so make sure
|
||||
* +obj+ is of type T_OBJECT before using this function.
|
||||
*/
|
||||
VALUE
|
||||
rb_vm_set_ivar_idx(VALUE obj, uint32_t index, VALUE val)
|
||||
{
|
||||
RUBY_ASSERT(RB_TYPE_P(obj, T_OBJECT));
|
||||
|
||||
rb_check_frozen_internal(obj);
|
||||
|
||||
VM_ASSERT(!rb_ractor_shareable_p(obj));
|
||||
|
||||
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
|
||||
rb_init_iv_list(obj);
|
||||
}
|
||||
VALUE *ptr = ROBJECT_IVPTR(obj);
|
||||
RB_OBJ_WRITE(obj, &ptr[index], val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
|
||||
{
|
||||
|
@ -3106,17 +3244,45 @@ vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_call
|
|||
const struct rb_callcache *cc = calling->cc;
|
||||
RB_DEBUG_COUNTER_INC(ccf_ivar);
|
||||
cfp->sp -= 1;
|
||||
return vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
|
||||
VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
|
||||
return ivar;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
|
||||
{
|
||||
RB_DEBUG_COUNTER_INC(ccf_attrset);
|
||||
VALUE val = *(cfp->sp - 1);
|
||||
cfp->sp -= 2;
|
||||
shape_id_t source_shape_id = vm_cc_attr_index_source_shape_id(cc);
|
||||
attr_index_t index = vm_cc_attr_index(cc);
|
||||
shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
|
||||
ID id = vm_cc_cme(cc)->def->body.attr.id;
|
||||
rb_check_frozen_internal(obj);
|
||||
VALUE res = vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index);
|
||||
if (res == Qundef) {
|
||||
switch (BUILTIN_TYPE(obj)) {
|
||||
case T_OBJECT:
|
||||
case T_CLASS:
|
||||
case T_MODULE:
|
||||
break;
|
||||
default:
|
||||
{
|
||||
res = vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index);
|
||||
if (res != Qundef) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
res = vm_setivar_slowpath_attr(obj, id, val, cc);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
|
||||
{
|
||||
const struct rb_callcache *cc = calling->cc;
|
||||
RB_DEBUG_COUNTER_INC(ccf_attrset);
|
||||
VALUE val = *(cfp->sp - 1);
|
||||
cfp->sp -= 2;
|
||||
return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, NULL, cc, 1);
|
||||
return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -3225,7 +3391,7 @@ vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_cal
|
|||
{
|
||||
calling->cc = &VM_CC_ON_STACK(Qundef,
|
||||
vm_call_general,
|
||||
{ 0 },
|
||||
{{0}},
|
||||
aliased_callable_method_entry(vm_cc_cme(calling->cc)));
|
||||
|
||||
return vm_call_method_each_type(ec, cfp, calling);
|
||||
|
@ -3395,7 +3561,7 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_
|
|||
|
||||
ec->method_missing_reason = reason;
|
||||
calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
|
||||
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
|
||||
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
|
||||
rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
|
||||
return vm_call_method(ec, reg_cfp, calling);
|
||||
}
|
||||
|
@ -3421,7 +3587,7 @@ vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
|
|||
cme = refined_method_callable_without_refinement(cme);
|
||||
}
|
||||
|
||||
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 }, cme);
|
||||
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
|
||||
|
||||
return vm_call_method_each_type(ec, cfp, calling);
|
||||
}
|
||||
|
@ -3528,7 +3694,7 @@ search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struc
|
|||
static VALUE
|
||||
vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
|
||||
{
|
||||
struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
|
||||
struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
|
||||
search_refined_method(ec, cfp, calling));
|
||||
|
||||
if (vm_cc_cme(ref_cc)) {
|
||||
|
@ -3708,18 +3874,45 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
|
|||
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
|
||||
|
||||
rb_check_arity(calling->argc, 1, 1);
|
||||
vm_cc_attr_index_initialize(cc);
|
||||
|
||||
const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG);
|
||||
|
||||
if (vm_cc_markable(cc)) {
|
||||
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
|
||||
VM_CALL_METHOD_ATTR(v,
|
||||
vm_call_attrset(ec, cfp, calling),
|
||||
vm_call_attrset_direct(ec, cfp, cc, calling->recv),
|
||||
CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
|
||||
} else {
|
||||
cc = &((struct rb_callcache) {
|
||||
.flags = T_IMEMO |
|
||||
(imemo_callcache << FL_USHIFT) |
|
||||
VM_CALLCACHE_UNMARKABLE |
|
||||
((VALUE)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT) |
|
||||
VM_CALLCACHE_ON_STACK,
|
||||
.klass = cc->klass,
|
||||
.cme_ = cc->cme_,
|
||||
.call_ = cc->call_,
|
||||
.aux_ = {
|
||||
.attr = {
|
||||
.index = 0,
|
||||
.dest_shape_id = INVALID_SHAPE_ID,
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
VM_CALL_METHOD_ATTR(v,
|
||||
vm_call_attrset_direct(ec, cfp, cc, calling->recv),
|
||||
CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
|
||||
}
|
||||
return v;
|
||||
|
||||
case VM_METHOD_TYPE_IVAR:
|
||||
CALLER_SETUP_ARG(cfp, calling, ci);
|
||||
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
|
||||
rb_check_arity(calling->argc, 0, 0);
|
||||
vm_cc_attr_index_initialize(cc);
|
||||
if (vm_cc_markable(cc)) {
|
||||
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
|
||||
}
|
||||
const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
|
||||
VM_CALL_METHOD_ATTR(v,
|
||||
vm_call_ivar(ec, cfp, calling),
|
||||
|
|
|
@ -40,6 +40,7 @@ fn main() {
|
|||
.header("internal.h")
|
||||
.header("internal/re.h")
|
||||
.header("include/ruby/ruby.h")
|
||||
.header("shape.h")
|
||||
.header("vm_core.h")
|
||||
.header("vm_callinfo.h")
|
||||
|
||||
|
@ -81,6 +82,12 @@ fn main() {
|
|||
// This function prints info about a value and is useful for debugging
|
||||
.allowlist_function("rb_obj_info_dump")
|
||||
|
||||
// From shape.h
|
||||
.allowlist_function("rb_shape_get_shape_id")
|
||||
.allowlist_function("rb_shape_get_shape_by_id")
|
||||
.allowlist_function("rb_shape_flags_mask")
|
||||
.allowlist_function("rb_shape_get_iv_index")
|
||||
|
||||
// From ruby/internal/intern/object.h
|
||||
.allowlist_function("rb_obj_is_kind_of")
|
||||
|
||||
|
|
|
@ -617,7 +617,7 @@ fn write_rm_multi(cb: &mut CodeBlock, op_mem_reg8: u8, op_mem_reg_pref: u8, op_r
|
|||
write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_lrg]);
|
||||
cb.write_int(uimm.value, if opnd_size > 32 { 32 } else { opnd_size.into() });
|
||||
} else {
|
||||
panic!("immediate value too large");
|
||||
panic!("immediate value too large (num_bits={})", num_bits);
|
||||
}
|
||||
},
|
||||
_ => unreachable!()
|
||||
|
|
|
@ -1938,11 +1938,9 @@ fn gen_set_ivar(
|
|||
let val_opnd = ctx.stack_pop(1);
|
||||
let recv_opnd = ctx.stack_pop(1);
|
||||
|
||||
let ivar_index: u32 = unsafe { rb_obj_ensure_iv_index_mapping(recv, ivar_name) };
|
||||
|
||||
// Call rb_vm_set_ivar_idx with the receiver, the index of the ivar, and the value
|
||||
// Call rb_vm_set_ivar_id with the receiver, the ivar name, and the value
|
||||
let val = asm.ccall(
|
||||
rb_vm_set_ivar_idx as *const u8,
|
||||
rb_vm_set_ivar_id as *const u8,
|
||||
vec![
|
||||
recv_opnd,
|
||||
ivar_index.into(),
|
||||
|
@ -2023,38 +2021,50 @@ fn gen_get_ivar(
|
|||
return EndBlock;
|
||||
}
|
||||
|
||||
// FIXME: Mapping the index could fail when there is too many ivar names. If we're
|
||||
// compiling for a branch stub that can cause the exception to be thrown from the
|
||||
// wrong PC.
|
||||
let ivar_index =
|
||||
unsafe { rb_obj_ensure_iv_index_mapping(comptime_receiver, ivar_name) }.as_usize();
|
||||
let ivar_index = unsafe {
|
||||
let shape_id = comptime_receiver.shape_of();
|
||||
let shape = rb_shape_get_shape_by_id(shape_id);
|
||||
let mut ivar_index: u32 = 0;
|
||||
if rb_shape_get_iv_index(shape, ivar_name, &mut ivar_index) {
|
||||
Some(ivar_index as usize)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
// must be before stack_pop
|
||||
let recv_type = ctx.get_opnd_type(recv_opnd);
|
||||
|
||||
// Upgrade type
|
||||
if !recv_type.is_heap() {
|
||||
ctx.upgrade_opnd_type(recv_opnd, Type::UnknownHeap);
|
||||
}
|
||||
|
||||
// Pop receiver if it's on the temp stack
|
||||
if recv_opnd != SelfOpnd {
|
||||
ctx.stack_pop(1);
|
||||
}
|
||||
|
||||
if USE_RVARGC != 0 {
|
||||
// Check that the ivar table is big enough
|
||||
// Check that the slot is inside the ivar table (num_slots > index)
|
||||
let num_slots = Opnd::mem(32, recv, ROBJECT_OFFSET_NUMIV);
|
||||
asm.cmp(num_slots, Opnd::UImm(ivar_index as u64));
|
||||
asm.jbe(counted_exit!(ocb, side_exit, getivar_idx_out_of_range).into());
|
||||
// Guard heap object
|
||||
if !recv_type.is_heap() {
|
||||
guard_object_is_heap(asm, recv, side_exit);
|
||||
}
|
||||
|
||||
// Compile time self is embedded and the ivar index lands within the object
|
||||
let test_result = unsafe { FL_TEST_RAW(comptime_receiver, VALUE(ROBJECT_EMBED.as_usize())) != VALUE(0) };
|
||||
if test_result {
|
||||
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
|
||||
let embed_test_result = unsafe { FL_TEST_RAW(comptime_receiver, VALUE(ROBJECT_EMBED.as_usize())) != VALUE(0) };
|
||||
|
||||
// Guard that self is embedded
|
||||
// TODO: BT and JC is shorter
|
||||
asm.comment("guard embedded getivar");
|
||||
let flags_mask: usize = unsafe { rb_shape_flags_mask() }.as_usize();
|
||||
let expected_flags_mask: usize = (RUBY_T_MASK as usize) | !flags_mask | (ROBJECT_EMBED as usize);
|
||||
let expected_flags = comptime_receiver.builtin_flags() & expected_flags_mask;
|
||||
|
||||
// Combined guard for all flags: shape, embeddedness, and T_OBJECT
|
||||
let flags_opnd = Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS);
|
||||
asm.test(flags_opnd, Opnd::UImm(ROBJECT_EMBED as u64));
|
||||
let side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic);
|
||||
|
||||
asm.comment("guard shape, embedded, and T_OBJECT");
|
||||
let flags_opnd = asm.and(flags_opnd, Opnd::UImm(expected_flags_mask as u64));
|
||||
asm.cmp(flags_opnd, Opnd::UImm(expected_flags as u64));
|
||||
jit_chain_guard(
|
||||
JCC_JZ,
|
||||
JCC_JNE,
|
||||
jit,
|
||||
&starting_context,
|
||||
asm,
|
||||
|
@ -2063,41 +2073,30 @@ fn gen_get_ivar(
|
|||
side_exit,
|
||||
);
|
||||
|
||||
// Load the variable
|
||||
let offs = ROBJECT_OFFSET_AS_ARY + (ivar_index * SIZEOF_VALUE) as i32;
|
||||
let ivar_opnd = Opnd::mem(64, recv, offs);
|
||||
// If there is no IVAR index, then the ivar was undefined
|
||||
// when we entered the compiler. That means we can just return
|
||||
// nil for this shape + iv name
|
||||
if ivar_index.is_none() {
|
||||
let out_opnd = ctx.stack_push(Type::Nil);
|
||||
asm.mov(out_opnd, Qnil.into());
|
||||
} else if embed_test_result {
|
||||
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
|
||||
|
||||
// Guard that the variable is not Qundef
|
||||
asm.cmp(ivar_opnd, Qundef.into());
|
||||
let out_val = asm.csel_e(Qnil.into(), ivar_opnd);
|
||||
// Load the variable
|
||||
let offs = ROBJECT_OFFSET_AS_ARY + (ivar_index.unwrap() * SIZEOF_VALUE) as i32;
|
||||
let ivar_opnd = Opnd::mem(64, recv, offs);
|
||||
|
||||
// Push the ivar on the stack
|
||||
let out_opnd = ctx.stack_push(Type::Unknown);
|
||||
asm.mov(out_opnd, out_val);
|
||||
asm.mov(out_opnd, ivar_opnd);
|
||||
} else {
|
||||
// Compile time value is *not* embedded.
|
||||
|
||||
// Guard that value is *not* embedded
|
||||
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
|
||||
asm.comment("guard extended getivar");
|
||||
let flags_opnd = Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS);
|
||||
asm.test(flags_opnd, Opnd::UImm(ROBJECT_EMBED as u64));
|
||||
let megamorphic_side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic);
|
||||
jit_chain_guard(
|
||||
JCC_JNZ,
|
||||
jit,
|
||||
&starting_context,
|
||||
asm,
|
||||
ocb,
|
||||
max_chain_depth,
|
||||
megamorphic_side_exit,
|
||||
);
|
||||
|
||||
if USE_RVARGC == 0 {
|
||||
// Check that the extended table is big enough
|
||||
// Check that the slot is inside the extended table (num_slots > index)
|
||||
let num_slots = Opnd::mem(32, recv, ROBJECT_OFFSET_NUMIV);
|
||||
asm.cmp(num_slots, Opnd::UImm(ivar_index as u64));
|
||||
asm.cmp(num_slots, Opnd::UImm(ivar_index.unwrap() as u64));
|
||||
asm.jbe(counted_exit!(ocb, side_exit, getivar_idx_out_of_range).into());
|
||||
}
|
||||
|
||||
|
@ -2105,15 +2104,10 @@ fn gen_get_ivar(
|
|||
let tbl_opnd = asm.load(Opnd::mem(64, recv, ROBJECT_OFFSET_AS_HEAP_IVPTR));
|
||||
|
||||
// Read the ivar from the extended table
|
||||
let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index) as i32);
|
||||
let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index.unwrap()) as i32);
|
||||
|
||||
// Check that the ivar is not Qundef
|
||||
asm.cmp(ivar_opnd, Qundef.into());
|
||||
let out_val = asm.csel_ne(ivar_opnd, Qnil.into());
|
||||
|
||||
// Push the ivar on the stack
|
||||
let out_opnd = ctx.stack_push(Type::Unknown);
|
||||
asm.mov(out_opnd, out_val);
|
||||
asm.mov(out_opnd, ivar_opnd);
|
||||
}
|
||||
|
||||
// Jump to next instruction. This allows guard chains to share the same successor.
|
||||
|
@ -2136,25 +2130,12 @@ fn gen_getinstancevariable(
|
|||
let ivar_name = jit_get_arg(jit, 0).as_u64();
|
||||
|
||||
let comptime_val = jit_peek_at_self(jit);
|
||||
let comptime_val_klass = comptime_val.class_of();
|
||||
|
||||
// Generate a side exit
|
||||
let side_exit = get_side_exit(jit, ocb, ctx);
|
||||
|
||||
// Guard that the receiver has the same class as the one from compile time.
|
||||
let self_asm_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF);
|
||||
jit_guard_known_klass(
|
||||
jit,
|
||||
ctx,
|
||||
asm,
|
||||
ocb,
|
||||
comptime_val_klass,
|
||||
self_asm_opnd,
|
||||
SelfOpnd,
|
||||
comptime_val,
|
||||
GET_IVAR_MAX_DEPTH,
|
||||
side_exit,
|
||||
);
|
||||
|
||||
gen_get_ivar(
|
||||
jit,
|
||||
|
|
|
@ -120,7 +120,7 @@ extern "C" {
|
|||
obj: VALUE,
|
||||
v: VALUE,
|
||||
) -> bool;
|
||||
pub fn rb_vm_set_ivar_idx(obj: VALUE, idx: u32, val: VALUE) -> VALUE;
|
||||
pub fn rb_vm_set_ivar_id(obj: VALUE, idx: u32, val: VALUE) -> VALUE;
|
||||
pub fn rb_vm_setinstancevariable(iseq: IseqPtr, obj: VALUE, id: ID, val: VALUE, ic: IVC);
|
||||
pub fn rb_aliased_callable_method_entry(
|
||||
me: *const rb_callable_method_entry_t,
|
||||
|
@ -354,18 +354,26 @@ impl VALUE {
|
|||
|
||||
/// Read the flags bits from the RBasic object, then return a Ruby type enum (e.g. RUBY_T_ARRAY)
|
||||
pub fn builtin_type(self) -> ruby_value_type {
|
||||
(self.builtin_flags() & (RUBY_T_MASK as usize)) as ruby_value_type
|
||||
}
|
||||
|
||||
pub fn builtin_flags(self) -> usize {
|
||||
assert!(!self.special_const_p());
|
||||
|
||||
let VALUE(cval) = self;
|
||||
let rbasic_ptr = cval as *const RBasic;
|
||||
let flags_bits: usize = unsafe { (*rbasic_ptr).flags }.as_usize();
|
||||
(flags_bits & (RUBY_T_MASK as usize)) as ruby_value_type
|
||||
return flags_bits;
|
||||
}
|
||||
|
||||
pub fn class_of(self) -> VALUE {
|
||||
unsafe { CLASS_OF(self) }
|
||||
}
|
||||
|
||||
pub fn shape_of(self) -> u32 {
|
||||
unsafe { rb_shape_get_shape_id(self) }
|
||||
}
|
||||
|
||||
pub fn as_isize(self) -> isize {
|
||||
let VALUE(is) = self;
|
||||
is as isize
|
||||
|
|
|
@ -402,6 +402,29 @@ extern "C" {
|
|||
extern "C" {
|
||||
pub fn rb_reg_new_ary(ary: VALUE, options: ::std::os::raw::c_int) -> VALUE;
|
||||
}
|
||||
pub type attr_index_t = u32;
|
||||
pub type shape_id_t = u32;
|
||||
#[repr(C)]
|
||||
pub struct rb_shape {
|
||||
pub parent: *mut rb_shape,
|
||||
pub edges: *mut rb_id_table,
|
||||
pub edge_name: ID,
|
||||
pub iv_count: attr_index_t,
|
||||
pub type_: u8,
|
||||
}
|
||||
pub type rb_shape_t = rb_shape;
|
||||
extern "C" {
|
||||
pub fn rb_shape_get_shape_by_id(shape_id: shape_id_t) -> *mut rb_shape_t;
|
||||
}
|
||||
extern "C" {
|
||||
pub fn rb_shape_get_shape_id(obj: VALUE) -> shape_id_t;
|
||||
}
|
||||
extern "C" {
|
||||
pub fn rb_shape_get_iv_index(shape: *mut rb_shape_t, id: ID, value: *mut attr_index_t) -> bool;
|
||||
}
|
||||
extern "C" {
|
||||
pub fn rb_shape_flags_mask() -> VALUE;
|
||||
}
|
||||
pub const idDot2: ruby_method_ids = 128;
|
||||
pub const idDot3: ruby_method_ids = 129;
|
||||
pub const idUPlus: ruby_method_ids = 132;
|
||||
|
@ -719,6 +742,11 @@ pub const OPTIMIZED_METHOD_TYPE_STRUCT_AREF: method_optimized_type = 3;
|
|||
pub const OPTIMIZED_METHOD_TYPE_STRUCT_ASET: method_optimized_type = 4;
|
||||
pub const OPTIMIZED_METHOD_TYPE__MAX: method_optimized_type = 5;
|
||||
pub type method_optimized_type = u32;
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct rb_id_table {
|
||||
_unused: [u8; 0],
|
||||
}
|
||||
extern "C" {
|
||||
pub fn rb_method_entry_at(obj: VALUE, id: ID) -> *const rb_method_entry_t;
|
||||
}
|
||||
|
@ -747,9 +775,10 @@ pub struct iseq_inline_constant_cache {
|
|||
pub segments: *const ID,
|
||||
}
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct iseq_inline_iv_cache_entry {
|
||||
pub entry: *mut rb_iv_index_tbl_entry,
|
||||
pub source_shape_id: shape_id_t,
|
||||
pub dest_shape_id: shape_id_t,
|
||||
pub attr_index: attr_index_t,
|
||||
}
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
|
@ -938,12 +967,6 @@ extern "C" {
|
|||
) -> *const rb_callable_method_entry_t;
|
||||
}
|
||||
#[repr(C)]
|
||||
pub struct rb_iv_index_tbl_entry {
|
||||
pub index: u32,
|
||||
pub class_serial: rb_serial_t,
|
||||
pub class_value: VALUE,
|
||||
}
|
||||
#[repr(C)]
|
||||
pub struct rb_cvar_class_tbl_entry {
|
||||
pub index: u32,
|
||||
pub global_cvar_state: rb_serial_t,
|
||||
|
|
Загрузка…
Ссылка в новой задаче