Streamline cached attr reader / writer indexes

This commit removes the need to increment and decrement the indexes
used by vm_cc_attr_index getters and setters. It also introduces a
vm_cc_attr_index_p predicate function, and a vm_cc_attr_index_initalize
function.
This commit is contained in:
Jemma Issroff 2022-01-25 16:04:17 -05:00 коммит произвёл Aaron Patterson
Родитель 3ce97a182f
Коммит 1a180b7e18
2 изменённых файлов: 25 добавлений и 10 удалений

Просмотреть файл

@ -353,7 +353,14 @@ static inline unsigned int
vm_cc_attr_index(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.attr_index;
return cc->aux_.attr_index - 1;
}
static inline bool
vm_cc_attr_index_p(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.attr_index > 0;
}
static inline unsigned int
@ -406,7 +413,15 @@ vm_cc_attr_index_set(const struct rb_callcache *cc, int index)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*(int *)&cc->aux_.attr_index = index;
*(int *)&cc->aux_.attr_index = index + 1;
}
static inline void
vm_cc_attr_index_initialize(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*(int *)&cc->aux_.attr_index = 0;
}
static inline void

Просмотреть файл

@ -1108,7 +1108,7 @@ fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, in
RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
}
else {
vm_cc_attr_index_set(cc, (int)ent->index + 1);
vm_cc_attr_index_set(cc, ent->index);
}
}
@ -1123,10 +1123,10 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
// frozen?
}
else if (LIKELY(is_attr ?
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index(cc) > 0) :
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index_p(cc)) :
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial,
ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
uint32_t index = !is_attr ? ic->entry->index : (vm_cc_attr_index(cc) - 1);
uint32_t index = !is_attr ? ic->entry->index: (vm_cc_attr_index(cc));
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
@ -1215,7 +1215,7 @@ vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic,
rb_raise(rb_eArgError, "too many instance variables");
}
else {
vm_cc_attr_index_set(cc, (int)(ent->index + 1));
vm_cc_attr_index_set(cc, (int)(ent->index));
}
uint32_t index = ent->index;
@ -1258,8 +1258,8 @@ vm_setivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const str
if (LIKELY(
(!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass))) ||
( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index(cc) > 0)))) {
uint32_t index = !is_attr ? ic->entry->index : vm_cc_attr_index(cc)-1;
( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index_p(cc))))) {
uint32_t index = !is_attr ? ic->entry->index : vm_cc_attr_index(cc);
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
rb_init_iv_list(obj);
@ -3643,7 +3643,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 1, 1);
vm_cc_attr_index_set(cc, 0);
vm_cc_attr_index_initialize(cc);
const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG);
VM_CALL_METHOD_ATTR(v,
vm_call_attrset(ec, cfp, calling),
@ -3654,7 +3654,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 0, 0);
vm_cc_attr_index_set(cc, 0);
vm_cc_attr_index_initialize(cc);
const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
VM_CALL_METHOD_ATTR(v,
vm_call_ivar(ec, cfp, calling),