Initialize shape attr index also in non-markable CC

This commit is contained in:
Nobuyoshi Nakada 2022-10-12 19:38:29 +09:00 коммит произвёл Aaron Patterson
Родитель 80da7250c5
Коммит b55e3b842a
2 изменённых файлов: 14 добавлений и 24 удалений

Просмотреть файл

@ -301,12 +301,12 @@ extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
#define vm_cc_empty() rb_vm_empty_cc()
static inline void vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id);
static inline void
vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*(uintptr_t *)&cc->aux_.attr.value = (uintptr_t)(shape_id) << SHAPE_FLAG_SHIFT;
vm_cc_attr_index_set(cc, (attr_index_t)-1, shape_id);
}
static inline const struct rb_callcache *
@ -385,7 +385,6 @@ vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
static inline void
vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t * shape_id, attr_index_t * index)
{
VM_ASSERT(vm_cc_markable(cc));
uintptr_t cache_value = cc->aux_.attr.value; // Atomically read 64 bits
*shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
*index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
@ -451,9 +450,14 @@ vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
static inline void
vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id)
{
uintptr_t *attr_value = (uintptr_t *)&cc->aux_.attr.value;
if (!vm_cc_markable(cc)) {
*attr_value = (uintptr_t)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT;
return;
}
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*(uintptr_t *)&cc->aux_.attr.value = (index + 1) | ((uintptr_t)(dest_shape_id) << SHAPE_FLAG_SHIFT);
*attr_value = (index + 1) | ((uintptr_t)(dest_shape_id) << SHAPE_FLAG_SHIFT);
}
static inline void

Просмотреть файл

@ -1096,10 +1096,8 @@ static inline void
fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
{
if (is_attr) {
if (vm_cc_markable(cc)) {
vm_cc_attr_index_set(cc, index, shape_id);
}
}
else {
vm_ic_attr_index_set(iseq, ic, index, shape_id);
}
@ -1161,14 +1159,8 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
attr_index_t index;
if (is_attr) {
if (vm_cc_markable(cc)) {
vm_cc_atomic_shape_and_index(cc, &cached_id, &index);
}
else {
cached_id = INVALID_SHAPE_ID;
index = ATTR_INDEX_NOT_SET;
}
}
else {
vm_ic_atomic_shape_and_index(ic, &cached_id, &index);
}
@ -1214,10 +1206,8 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
}
else {
if (is_attr) {
if (vm_cc_markable(cc)) {
vm_cc_attr_index_initialize(cc, shape_id);
}
}
else {
vm_ic_attr_index_initialize(ic, shape_id);
}
@ -1248,10 +1238,8 @@ populate_cache(attr_index_t index, shape_id_t next_shape_id, ID id, const rb_ise
{
// Cache population code
if (is_attr) {
if (vm_cc_markable(cc)) {
vm_cc_attr_index_set(cc, index, next_shape_id);
}
}
else {
vm_ic_attr_index_set(iseq, ic, index, next_shape_id);
}
@ -3927,9 +3915,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 0, 0);
if (vm_cc_markable(cc)) {
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
}
const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
VM_CALL_METHOD_ATTR(v,
vm_call_ivar(ec, cfp, calling),