Introduce disposable call-cache.

This patch contains several ideas:

(1) Disposable inline method cache (IMC) for race-free inline method cache
    * Making call-cache (CC) as a RVALUE (GC target object) and allocate new
      CC on cache miss.
    * This technique allows race-free access from parallel processing
      elements like RCU.
(2) Introduce per-Class method cache (pCMC)
    * Instead of fixed-size global method cache (GMC), pCMC allows flexible
      cache size.
    * Caching CCs reduces CC allocation and allow sharing CC's fast-path
      between same call-info (CI) call-sites.
(3) Invalidate an inline method cache by invalidating corresponding method
    entries (MEs)
    * Instead of using class serials, we set "invalidated" flag for method
      entry itself to represent cache invalidation.
    * Compare with using class serials, the impact of method modification
      (add/overwrite/delete) is small.
    * Updating class serials invalidate all method caches of the class and
      sub-classes.
    * Proposed approach only invalidate the method cache of only one ME.

See [Feature #16614] for more details.
This commit is contained in:
Koichi Sasada 2020-01-08 16:14:01 +09:00
Родитель f2286925f0
Коммит b9007b6c54
32 изменённых файлов: 1733 добавлений и 997 удалений

45
class.c
Просмотреть файл

@ -894,12 +894,21 @@ add_refined_method_entry_i(ID key, VALUE value, void *data)
static void ensure_origin(VALUE klass);
static enum rb_id_table_iterator_result
clear_module_cache_i(ID id, VALUE val, void *data)
{
VALUE klass = (VALUE)data;
rb_clear_method_cache(klass, id);
return ID_TABLE_CONTINUE;
}
static int
include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super)
{
VALUE p, iclass;
int method_changed = 0, constant_changed = 0;
struct rb_id_table *const klass_m_tbl = RCLASS_M_TBL(RCLASS_ORIGIN(klass));
VALUE original_klass = klass;
if (FL_TEST(module, RCLASS_REFINED_BY_ANY)) {
ensure_origin(module);
@ -912,7 +921,7 @@ include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super)
if (klass_m_tbl && klass_m_tbl == RCLASS_M_TBL(module))
return -1;
/* ignore if the module included already in superclasses */
for (p = RCLASS_SUPER(klass); p; p = RCLASS_SUPER(p)) {
for (p = RCLASS_SUPER(klass); p; p = RCLASS_SUPER(p)) {
int type = BUILTIN_TYPE(p);
if (type == T_ICLASS) {
if (RCLASS_M_TBL(p) == RCLASS_M_TBL(module)) {
@ -924,37 +933,53 @@ include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super)
}
else if (type == T_CLASS) {
if (!search_super) break;
superclass_seen = TRUE;
superclass_seen = TRUE;
}
}
iclass = rb_include_class_new(module, RCLASS_SUPER(c));
VALUE super_class = RCLASS_SUPER(c);
// invalidate inline method cache
tbl = RMODULE_M_TBL(module);
if (tbl && rb_id_table_size(tbl)) {
if (search_super) { // include
if (super_class && !RB_TYPE_P(super_class, T_MODULE)) {
rb_id_table_foreach(tbl, clear_module_cache_i, (void *)super_class);
}
}
else { // prepend
if (!RB_TYPE_P(original_klass, T_MODULE)) {
rb_id_table_foreach(tbl, clear_module_cache_i, (void *)original_klass);
}
}
method_changed = 1;
}
// setup T_ICLASS for the include/prepend module
iclass = rb_include_class_new(module, super_class);
c = RCLASS_SET_SUPER(c, iclass);
RCLASS_SET_INCLUDER(iclass, klass);
{
VALUE m = module;
if (BUILTIN_TYPE(m) == T_ICLASS) m = RBASIC(m)->klass;
rb_module_add_to_subclasses_list(m, iclass);
if (BUILTIN_TYPE(m) == T_ICLASS) m = RBASIC(m)->klass;
rb_module_add_to_subclasses_list(m, iclass);
}
if (FL_TEST(klass, RMODULE_IS_REFINEMENT)) {
VALUE refined_class =
rb_refinement_module_get_refined_class(klass);
rb_id_table_foreach(RMODULE_M_TBL(module), add_refined_method_entry_i, (void *)refined_class);
rb_id_table_foreach(RMODULE_M_TBL(module), add_refined_method_entry_i, (void *)refined_class);
FL_SET(c, RMODULE_INCLUDED_INTO_REFINEMENT);
}
tbl = RMODULE_M_TBL(module);
if (tbl && rb_id_table_size(tbl)) method_changed = 1;
tbl = RMODULE_CONST_TBL(module);
if (tbl && rb_id_table_size(tbl)) constant_changed = 1;
skip:
module = RCLASS_SUPER(module);
}
if (method_changed) rb_clear_method_cache_by_class(klass);
if (constant_changed) rb_clear_constant_cache();
return method_changed;

Просмотреть файл

@ -2946,6 +2946,7 @@ mjit.$(OBJEXT): {$(VPATH)}thread.h
mjit.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
mjit.$(OBJEXT): {$(VPATH)}thread_native.h
mjit.$(OBJEXT): {$(VPATH)}util.h
mjit.$(OBJEXT): {$(VPATH)}vm_callinfo.h
mjit.$(OBJEXT): {$(VPATH)}vm_core.h
mjit.$(OBJEXT): {$(VPATH)}vm_opts.h
mjit_compile.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h

Просмотреть файл

@ -566,6 +566,8 @@ static void
verify_call_cache(rb_iseq_t *iseq)
{
#if CPDEBUG
// fprintf(stderr, "ci_size:%d\t", iseq->body->ci_size); rp(iseq);
VALUE *original = rb_iseq_original_iseq(iseq);
size_t i = 0;
while (i < iseq->body->iseq_size) {
@ -574,16 +576,27 @@ verify_call_cache(rb_iseq_t *iseq)
for (int j=0; types[j]; j++) {
if (types[j] == TS_CALLDATA) {
struct rb_call_cache cc;
struct rb_call_data *cd = (struct rb_call_data *)original[i+j+1];
MEMZERO(&cc, cc, 1);
if (memcmp(&cc, &cd->cc, sizeof(cc))) {
rb_bug("call cache not zero for fresh iseq");
const struct rb_callinfo *ci = cd->ci;
const struct rb_callcache *cc = cd->cc;
if (cc != vm_cc_empty()) {
vm_ci_dump(ci);
rb_bug("call cache is not initialized by vm_cc_empty()");
}
}
}
i += insn_len(insn);
}
for (unsigned int i=0; i<iseq->body->ci_size; i++) {
struct rb_call_data *cd = &iseq->body->call_data[i];
const struct rb_callinfo *ci = cd->ci;
const struct rb_callcache *cc = cd->cc;
if (cc != NULL && cc != vm_cc_empty()) {
vm_ci_dump(ci);
rb_bug("call cache is not initialized by vm_cc_empty()");
}
}
#endif
}
@ -661,7 +674,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node)
DECL_ANCHOR(ret);
INIT_ANCHOR(ret);
if (imemo_type_p((VALUE)node, imemo_ifunc)) {
if (IMEMO_TYPE_P(node, imemo_ifunc)) {
rb_raise(rb_eArgError, "unexpected imemo_ifunc");
}
@ -1212,6 +1225,7 @@ new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_cal
argc += kw_arg->keyword_len;
}
// fprintf(stderr, "[%d] id:%s\t", (int)iseq->body->ci_size, rb_id2name(mid)); rp(iseq);
iseq->body->ci_size++;
const struct rb_callinfo *ci = vm_ci_new(mid, flag, argc, kw_arg);
RB_OBJ_WRITTEN(iseq, Qundef, ci);
@ -2223,6 +2237,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
struct rb_call_data *cd = &body->call_data[ISEQ_COMPILE_DATA(iseq)->ci_index++];
assert(ISEQ_COMPILE_DATA(iseq)->ci_index <= body->ci_size);
cd->ci = source_ci;
cd->cc = vm_cc_empty();
generated_iseq[code_index + 1 + j] = (VALUE)cd;
break;
}
@ -10301,16 +10316,18 @@ ibf_dump_ci_entries(struct ibf_dump *dump, const rb_iseq_t *iseq)
}
/* note that we dump out rb_call_info but load back rb_call_data */
static struct rb_call_data *
static void
ibf_load_ci_entries(const struct ibf_load *load,
ibf_offset_t ci_entries_offset,
unsigned int ci_size)
unsigned int ci_size,
struct rb_call_data **cd_ptr)
{
ibf_offset_t reading_pos = ci_entries_offset;
unsigned int i;
struct rb_call_data *cds = ZALLOC_N(struct rb_call_data, ci_size);
*cd_ptr = cds;
for (i = 0; i < ci_size; i++) {
VALUE mid_index = ibf_load_small_value(load, &reading_pos);
@ -10331,10 +10348,9 @@ ibf_load_ci_entries(const struct ibf_load *load,
cds[i].ci = vm_ci_new(mid, flag, argc, kwarg);
RB_OBJ_WRITTEN(load->iseq, Qundef, cds[i].ci);
cds[i].cc = vm_cc_empty();
}
return cds;
}
}
static ibf_offset_t
ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq)
@ -10588,7 +10604,7 @@ ibf_load_iseq_each(struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t offset)
load_body->catch_except_p = catch_except_p;
load_body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, is_size);
load_body->call_data = ibf_load_ci_entries(load, ci_entries_offset, ci_size);
ibf_load_ci_entries(load, ci_entries_offset, ci_size, &load_body->call_data);
load_body->param.opt_table = ibf_load_param_opt_table(load, param_opt_table_offset, param_opt_num);
load_body->param.keyword = ibf_load_param_keyword(load, param_keyword_offset);
load_body->param.flags.has_kw = (param_flags >> 4) & 1;

Просмотреть файл

@ -14,46 +14,45 @@
#ifdef RB_DEBUG_COUNTER
/*
* method cache (mc) counts.
*
* * mc_inline_hit/miss: inline mc hit/miss counts (VM send insn)
* * mc_global_hit/miss: global method cache hit/miss counts
* two types: (1) inline cache miss (VM send insn)
* (2) called from C (rb_funcall).
* * mc_global_state_miss: inline mc miss by global_state miss.
* * mc_class_serial_miss: ... by mc_class_serial_miss
* * mc_cme_complement: callable_method_entry complement counts.
* * mc_cme_complement_hit: callable_method_entry cache hit counts.
* * mc_search_super: search_method() call counts.
* * mc_miss_by_nome: inline mc miss by no ment.
* * mc_miss_by_distinct: ... by distinct ment.
* * mc_miss_by_refine: ... by ment being refined.
* * mc_miss_by_visi: ... by visibility change.
* * mc_miss_spurious: spurious inline mc misshit.
* * mc_miss_reuse_call: count of reuse of cc->call.
*/
RB_DEBUG_COUNTER(mc_inline_hit)
RB_DEBUG_COUNTER(mc_inline_miss)
RB_DEBUG_COUNTER(mc_global_hit)
RB_DEBUG_COUNTER(mc_global_miss)
RB_DEBUG_COUNTER(mc_global_state_miss)
RB_DEBUG_COUNTER(mc_class_serial_miss)
RB_DEBUG_COUNTER(mc_cme_complement)
RB_DEBUG_COUNTER(mc_cme_complement_hit)
RB_DEBUG_COUNTER(mc_search_super)
RB_DEBUG_COUNTER(mc_miss_by_nome)
RB_DEBUG_COUNTER(mc_miss_by_distinct)
RB_DEBUG_COUNTER(mc_miss_by_refine)
RB_DEBUG_COUNTER(mc_miss_by_visi)
RB_DEBUG_COUNTER(mc_miss_spurious)
RB_DEBUG_COUNTER(mc_miss_reuse_call)
// method cache (IMC: inline method cache)
RB_DEBUG_COUNTER(mc_inline_hit) // IMC hit
RB_DEBUG_COUNTER(mc_inline_miss_klass) // IMC miss by different class
RB_DEBUG_COUNTER(mc_inline_miss_invalidated) // IMC miss by invalidated ME
RB_DEBUG_COUNTER(mc_cme_complement) // number of acquiring complement CME
RB_DEBUG_COUNTER(mc_cme_complement_hit) // number of cahche hit for complemented CME
RB_DEBUG_COUNTER(mc_search) // count for method lookup in class tree
RB_DEBUG_COUNTER(mc_search_notfound) // method lookup, but not found
RB_DEBUG_COUNTER(mc_search_super) // total traversed classes
// callinfo
RB_DEBUG_COUNTER(ci_packed)
RB_DEBUG_COUNTER(ci_kw)
RB_DEBUG_COUNTER(ci_nokw)
RB_DEBUG_COUNTER(ci_runtime)
RB_DEBUG_COUNTER(ci_packed) // number of packed CI
RB_DEBUG_COUNTER(ci_kw) // non-packed CI w/ keywords
RB_DEBUG_COUNTER(ci_nokw) // non-packed CI w/o keywords
RB_DEBUG_COUNTER(ci_runtime) // creating temporary CI
// callcache
RB_DEBUG_COUNTER(cc_new) // number of CC
RB_DEBUG_COUNTER(cc_temp) // dummy CC (stack-allocated)
RB_DEBUG_COUNTER(cc_found_ccs) // count for CC lookup sucess in CCS
RB_DEBUG_COUNTER(cc_ent_invalidate) // count for invalidating cc (cc->klass = 0)
RB_DEBUG_COUNTER(cc_cme_invalidate) // coutn for invalidating CME
RB_DEBUG_COUNTER(cc_invalidate_leaf) // count for invalidating klass if klass has no-sublcasses
RB_DEBUG_COUNTER(cc_invalidate_leaf_ccs) // corresponding CCS
RB_DEBUG_COUNTER(cc_invalidate_leaf_callable) // complimented cache (no-subclasses)
RB_DEBUG_COUNTER(cc_invalidate_tree) // count for invalidating klass if klass has sublcasses
RB_DEBUG_COUNTER(cc_invalidate_tree_cme) // cme if cme is found in this class or superclasses
RB_DEBUG_COUNTER(cc_invalidate_tree_callable) // complimented cache (subclasses)
RB_DEBUG_COUNTER(ccs_free) // count for free'ing ccs
RB_DEBUG_COUNTER(ccs_maxlen) // maximum length of ccs
RB_DEBUG_COUNTER(ccs_found) // count for finding corresponding ccs on method lookup
// iseq
RB_DEBUG_COUNTER(iseq_num) // number of total created iseq
RB_DEBUG_COUNTER(iseq_cd_num) // number of total created cd (call_data)
/*
* call cache fastpath usage
@ -289,6 +288,7 @@ RB_DEBUG_COUNTER(obj_imemo_ifunc)
RB_DEBUG_COUNTER(obj_imemo_memo)
RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
RB_DEBUG_COUNTER(obj_imemo_callinfo)
RB_DEBUG_COUNTER(obj_imemo_callcache)
/* ar_table */
RB_DEBUG_COUNTER(artable_hint_hit)
@ -375,17 +375,33 @@ rb_debug_counter_add(enum rb_debug_counter_type type, int add, int cond)
return cond;
}
inline static int
rb_debug_counter_max(enum rb_debug_counter_type type, unsigned int num)
{
if (rb_debug_counter[(int)type] < num) {
rb_debug_counter[(int)type] = num;
return 1;
}
else {
return 0;
}
}
VALUE rb_debug_counter_reset(VALUE klass);
VALUE rb_debug_counter_show(VALUE klass);
#define RB_DEBUG_COUNTER_INC(type) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, 1)
#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (!rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, !(cond)))
#define RB_DEBUG_COUNTER_INC_IF(type, cond) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, (cond))
#define RB_DEBUG_COUNTER_ADD(type, num) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, (num), 1)
#define RB_DEBUG_COUNTER_SETMAX(type, num) rb_debug_counter_max(RB_DEBUG_COUNTER_##type, (unsigned int)(num))
#else
#define RB_DEBUG_COUNTER_INC(type) ((void)0)
#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (cond)
#define RB_DEBUG_COUNTER_INC_IF(type, cond) (cond)
#define RB_DEBUG_COUNTER_ADD(type, num) ((void)0)
#define RB_DEBUG_COUNTER_SETMAX(type, num) 0
#endif
void rb_debug_counter_show_results(const char *msg);

2
eval.c
Просмотреть файл

@ -1476,7 +1476,7 @@ rb_using_module(const rb_cref_t *cref, VALUE module)
{
Check_Type(module, T_MODULE);
using_module_recursive(cref, module);
rb_clear_method_cache_by_class(rb_cObject);
rb_clear_method_cache_all();
}
/*! \private */

Просмотреть файл

@ -638,6 +638,7 @@ count_imemo_objects(int argc, VALUE *argv, VALUE self)
imemo_type_ids[9] = rb_intern("imemo_ast");
imemo_type_ids[10] = rb_intern("imemo_parser_strterm");
imemo_type_ids[11] = rb_intern("imemo_callinfo");
imemo_type_ids[12] = rb_intern("imemo_callcache");
}
rb_objspace_each_objects(count_imemo_objects_i, (void *)hash);

204
gc.c
Просмотреть файл

@ -2530,6 +2530,116 @@ rb_free_const_table(struct rb_id_table *tbl)
rb_id_table_free(tbl);
}
// alive: if false, target pointers can be freed already.
// To check it, we need objspace parameter.
static void
vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, rb_objspace_t *objspace, VALUE klass)
{
if (ccs->entries) {
for (int i=0; i<ccs->len; i++) {
const struct rb_callcache *cc = ccs->entries[i].cc;
if (!alive) {
// ccs can be free'ed.
if (is_pointer_to_heap(objspace, (void *)cc) &&
IMEMO_TYPE_P(cc, imemo_callcache) &&
cc->klass == klass) {
// OK. maybe target cc.
}
else {
continue;
}
}
vm_cc_invalidate(cc);
}
ruby_xfree(ccs->entries);
}
ruby_xfree(ccs);
}
void
rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
{
RB_DEBUG_COUNTER_INC(ccs_free);
vm_ccs_free(ccs, TRUE, NULL, Qundef);
}
struct cc_tbl_i_data {
rb_objspace_t *objspace;
VALUE klass;
bool alive;
};
static enum rb_id_table_iterator_result
cc_table_mark_i(ID id, VALUE ccs_ptr, void *data_ptr)
{
struct cc_tbl_i_data *data = data_ptr;
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
VM_ASSERT(vm_ccs_p(ccs));
VM_ASSERT(id == ccs->cme->called_id);
if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
rb_vm_ccs_free(ccs);
return ID_TABLE_DELETE;
}
else {
gc_mark(data->objspace, (VALUE)ccs->cme);
for (int i=0; i<ccs->len; i++) {
VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
VM_ASSERT(ccs->cme == vm_cc_cme(ccs->entries[i].cc));
gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
}
return ID_TABLE_CONTINUE;
}
}
static void
cc_table_mark(rb_objspace_t *objspace, VALUE klass)
{
struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
if (cc_tbl) {
struct cc_tbl_i_data data = {
.objspace = objspace,
.klass = klass,
};
rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
}
}
static enum rb_id_table_iterator_result
cc_table_free_i(ID id, VALUE ccs_ptr, void *data_ptr)
{
struct cc_tbl_i_data *data = data_ptr;
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
VM_ASSERT(vm_ccs_p(ccs));
vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
return ID_TABLE_CONTINUE;
}
static void
cc_table_free(rb_objspace_t *objspace, VALUE klass, bool alive)
{
struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
if (cc_tbl) {
struct cc_tbl_i_data data = {
.objspace = objspace,
.klass = klass,
.alive = alive,
};
rb_id_table_foreach(cc_tbl, cc_table_free_i, &data);
rb_id_table_free(cc_tbl);
}
}
void
rb_cc_table_free(VALUE klass)
{
cc_table_free(&rb_objspace, klass, TRUE);
}
static inline void
make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
{
@ -2621,6 +2731,7 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
case T_CLASS:
mjit_remove_class_serial(RCLASS_SERIAL(obj));
rb_id_table_free(RCLASS_M_TBL(obj));
cc_table_free(objspace, obj, FALSE);
if (RCLASS_IV_TBL(obj)) {
st_free_table(RCLASS_IV_TBL(obj));
}
@ -2805,6 +2916,7 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
rb_class_detach_subclasses(obj);
RCLASS_EXT(obj)->subclasses = NULL;
}
cc_table_free(objspace, obj, FALSE);
rb_class_remove_from_module_subclasses(obj);
rb_class_remove_from_super_subclasses(obj);
xfree(RANY(obj)->as.klass.ptr);
@ -2896,6 +3008,9 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
case imemo_callinfo:
RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
break;
case imemo_callcache:
RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
break;
default:
/* unreachable */
break;
@ -5335,6 +5450,13 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
return;
case imemo_callinfo:
return;
case imemo_callcache:
{
const struct rb_callcache *cc = (const struct rb_callcache *)obj;
// should not mark klass here
gc_mark(objspace, (VALUE)vm_cc_cme(cc));
}
return;
#if VM_CHECK_MODE > 0
default:
VM_UNREACHABLE(gc_mark_imemo);
@ -5383,7 +5505,9 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj)
gc_mark(objspace, RCLASS_SUPER(obj));
}
if (!RCLASS_EXT(obj)) break;
mark_m_tbl(objspace, RCLASS_M_TBL(obj));
cc_table_mark(objspace, obj);
mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
break;
@ -5397,6 +5521,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj)
}
if (!RCLASS_EXT(obj)) break;
mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
cc_table_mark(objspace, obj);
break;
case T_ARRAY:
@ -8126,6 +8251,13 @@ gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
case imemo_ast:
rb_ast_update_references((rb_ast_t *)obj);
break;
case imemo_callcache:
{
const struct rb_callcache *cc = (const struct rb_callcache *)obj;
UPDATE_IF_MOVED(objspace, cc->klass);
TYPED_UPDATE_IF_MOVED(objspace, struct rb_callable_method_entry_struct *, cc->cme_);
}
break;
case imemo_parser_strterm:
case imemo_tmpbuf:
case imemo_callinfo:
@ -8201,6 +8333,39 @@ update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
}
}
static enum rb_id_table_iterator_result
update_cc_tbl_i(ID id, VALUE ccs_ptr, void *data)
{
rb_objspace_t *objspace = (rb_objspace_t *)data;
struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
VM_ASSERT(vm_ccs_p(ccs));
if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
ccs->cme = (const rb_callable_method_entry_t *)rb_gc_location((VALUE)ccs->cme);
}
for (int i=0; i<ccs->len; i++) {
if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
}
if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
}
}
// do not replace
return ID_TABLE_CONTINUE;
}
static void
update_cc_tbl(rb_objspace_t *objspace, VALUE klass)
{
struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
if (tbl) {
rb_id_table_foreach_with_replace(tbl, update_cc_tbl_i, NULL, objspace);
}
}
static enum rb_id_table_iterator_result
update_const_table(VALUE value, void *data)
{
@ -8257,7 +8422,10 @@ gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
}
if (!RCLASS_EXT(obj)) break;
update_m_tbl(objspace, RCLASS_M_TBL(obj));
update_cc_tbl(objspace, obj);
gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
update_class_ext(objspace, RCLASS_EXT(obj));
update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
break;
@ -8275,6 +8443,7 @@ gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
}
update_class_ext(objspace, RCLASS_EXT(obj));
update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
update_cc_tbl(objspace, obj);
break;
case T_IMEMO:
@ -8607,7 +8776,6 @@ gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_doubl
gc_check_references_for_moved(objspace);
}
rb_clear_method_cache_by_class(rb_cObject);
rb_clear_constant_cache();
heap_eden->free_pages = NULL;
heap_eden->using_page = NULL;
@ -11550,6 +11718,9 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
if (!NIL_P(class_path)) {
APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(class_path)));
}
else {
APPENDF((BUFF_ARGS, "(annon)"));
}
break;
}
case T_ICLASS:
@ -11606,21 +11777,31 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
IMEMO_NAME(ast);
IMEMO_NAME(parser_strterm);
IMEMO_NAME(callinfo);
IMEMO_NAME(callcache);
#undef IMEMO_NAME
default: UNREACHABLE;
}
APPENDF((BUFF_ARGS, "/%s", imemo_name));
APPENDF((BUFF_ARGS, "<%s> ", imemo_name));
switch (imemo_type(obj)) {
case imemo_ment: {
const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
if (me->def) {
APPENDF((BUFF_ARGS, "(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)",
APPENDF((BUFF_ARGS, ":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
rb_id2name(me->called_id),
METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
METHOD_ENTRY_CACHED(me) ? ",cc" : "",
METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
rb_method_type_name(me->def->type),
me->def->alias_count,
obj_info(me->owner),
obj_info(me->defined_class)));
me->def->alias_count,
(void *)me->owner, // obj_info(me->owner),
(void *)me->defined_class)); //obj_info(me->defined_class)));
if (me->def->type == VM_METHOD_TYPE_ISEQ) {
APPENDF((BUFF_ARGS, " (iseq:%p)", (void *)me->def->body.iseq.iseqptr));
}
}
else {
APPENDF((BUFF_ARGS, "%s", rb_id2name(me->called_id)));
@ -11642,6 +11823,17 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
vm_ci_kwarg(ci) ? "available" : "NULL"));
break;
}
case imemo_callcache:
{
const struct rb_callcache *cc = (const struct rb_callcache *)obj;
VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
APPENDF((BUFF_ARGS, "(klass:%s, cme:%s (%p) call:%p",
NIL_P(class_path) ? "??" : RSTRING_PTR(class_path),
vm_cc_cme(cc) ? rb_id2name(vm_cc_cme(cc)->called_id) : "<NULL>",
(void *)vm_cc_cme(cc), (void *)vm_cc_call(cc)));
break;
}
default:
break;
}

Просмотреть файл

@ -229,7 +229,7 @@ rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp)
int index = hash_table_index(tbl, key);
if (index >= 0) {
*valp = tbl->items[index].val;
*valp = tbl->items[index].val;
return TRUE;
}
else {

Просмотреть файл

@ -827,7 +827,7 @@ opt_nil_p
(VALUE recv)
(VALUE val)
{
val = vm_opt_nil_p(cd, recv);
val = vm_opt_nil_p(GET_ISEQ(), cd, recv);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
@ -903,8 +903,9 @@ invokeblock
// attr rb_snum_t sp_inc = sp_inc_of_invokeblock(cd->ci);
// attr rb_snum_t comptime_sp_inc = sp_inc_of_invokeblock(ci);
{
if (UNLIKELY(cd->cc.call != vm_invokeblock_i)) {
cd->cc.call = vm_invokeblock_i; // check before setting to avoid CoW
if (UNLIKELY(vm_cc_call(cd->cc) != vm_invokeblock_i)) {
const struct rb_callcache *cc = vm_cc_new(0, NULL, vm_invokeblock_i);
RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, cc);
}
VALUE bh = VM_BLOCK_HANDLER_NONE;
@ -1167,7 +1168,7 @@ opt_eq
(VALUE recv, VALUE obj)
(VALUE val)
{
val = opt_eq_func(recv, obj, cd);
val = opt_eq_func(GET_ISEQ(), recv, obj, cd);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
@ -1181,7 +1182,7 @@ opt_neq
(VALUE recv, VALUE obj)
(VALUE val)
{
val = vm_opt_neq(cd, cd_eq, recv, obj);
val = vm_opt_neq(GET_ISEQ(), cd, cd_eq, recv, obj);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
@ -1431,7 +1432,7 @@ opt_not
(VALUE recv)
(VALUE val)
{
val = vm_opt_not(cd, recv);
val = vm_opt_not(GET_ISEQ(), cd, recv);
if (val == Qundef) {
CALL_SIMPLE_METHOD();

Просмотреть файл

@ -41,6 +41,7 @@ struct rb_classext_struct {
#endif
struct rb_id_table *const_tbl;
struct rb_id_table *callable_m_tbl;
struct rb_id_table *cc_tbl; /* ID -> [[ci, cc1], cc2, ...] */
struct rb_subclass_entry *subclasses;
struct rb_subclass_entry **parent_subclasses;
/**
@ -83,6 +84,7 @@ typedef struct rb_classext_struct rb_classext_t;
# define RCLASS_M_TBL(c) (RCLASS(c)->m_tbl)
#endif
#define RCLASS_CALLABLE_M_TBL(c) (RCLASS_EXT(c)->callable_m_tbl)
#define RCLASS_CC_TBL(c) (RCLASS_EXT(c)->cc_tbl)
#define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl)
#define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin_)
#define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class)

Просмотреть файл

@ -29,6 +29,7 @@
#define IMEMO_FL_USER2 FL_USER6
#define IMEMO_FL_USER3 FL_USER7
#define IMEMO_FL_USER4 FL_USER8
#define IMEMO_FL_USER5 FL_USER9
enum imemo_type {
imemo_env = 0,
@ -43,6 +44,7 @@ enum imemo_type {
imemo_ast = 9,
imemo_parser_strterm = 10,
imemo_callinfo = 11,
imemo_callcache = 12,
};
/* CREF (Class REFerence) is defined in method.h */
@ -171,6 +173,8 @@ imemo_type_p(VALUE imemo, enum imemo_type imemo_type)
}
}
#define IMEMO_TYPE_P(v, t) imemo_type_p((VALUE)v, t)
static inline bool
imemo_throw_data_p(VALUE imemo)
{

Просмотреть файл

@ -52,44 +52,6 @@ enum method_missing_reason {
MISSING_NONE = 0x40
};
struct rb_call_cache {
/* inline cache: keys */
rb_serial_t method_state;
rb_serial_t class_serial[
(CACHELINE
- sizeof(rb_serial_t) /* method_state */
- sizeof(struct rb_callable_method_entry_struct *) /* me */
- sizeof(uintptr_t) /* method_serial */
- sizeof(enum method_missing_reason) /* aux */
- sizeof(VALUE (*)( /* call */
struct rb_execution_context_struct *e,
struct rb_control_frame_struct *,
struct rb_calling_info *,
const struct rb_call_data *)))
/ sizeof(rb_serial_t)
];
/* inline cache: values */
const struct rb_callable_method_entry_struct *me;
uintptr_t method_serial; /* me->def->method_serial */
VALUE (*call)(struct rb_execution_context_struct *ec,
struct rb_control_frame_struct *cfp,
struct rb_calling_info *calling,
struct rb_call_data *cd);
union {
unsigned int index; /* used by ivar */
enum method_missing_reason method_missing_reason; /* used by method_missing */
} aux;
};
STATIC_ASSERT(cachelined, sizeof(struct rb_call_cache) <= CACHELINE);
struct rb_call_data {
const struct rb_callinfo *ci;
struct rb_call_cache cc;
};
/* vm_insnhelper.h */
rb_serial_t rb_next_class_serial(void);
@ -139,8 +101,9 @@ MJIT_SYMBOL_EXPORT_END
VALUE rb_equal_opt(VALUE obj1, VALUE obj2);
VALUE rb_eql_opt(VALUE obj1, VALUE obj2);
struct rb_iseq_struct;
MJIT_SYMBOL_EXPORT_BEGIN
void rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass);
void rb_vm_search_method_slowpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass);
MJIT_SYMBOL_EXPORT_END
/* vm_dump.c */

17
iseq.c
Просмотреть файл

@ -247,6 +247,7 @@ rb_iseq_update_references(rb_iseq_t *iseq)
if (!SPECIAL_CONST_P(cds[i].ci)) {
cds[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)cds[i].ci);
}
cds[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)cds[i].cc);
}
}
if (FL_TEST(iseq, ISEQ_MARKABLE_ISEQ)) {
@ -323,6 +324,11 @@ rb_iseq_mark(const rb_iseq_t *iseq)
struct rb_call_data *cds = (struct rb_call_data *)body->call_data;
for (unsigned int i=0; i<body->ci_size; i++) {
rb_gc_mark_movable((VALUE)cds[i].ci);
const struct rb_callcache *cc = cds[i].cc;
if (cc && vm_cc_markable(cds[i].cc)) {
rb_gc_mark_movable((VALUE)cc);
// TODO: check enable
}
}
}
@ -351,6 +357,14 @@ rb_iseq_mark(const rb_iseq_t *iseq)
}
}
}
if (body->jit_unit && body->jit_unit->cc_entries != NULL) {
// TODO: move to mjit.c?
for (unsigned int i=0; i<body->ci_size; i++) {
const struct rb_callcache *cc = body->jit_unit->cc_entries[i];
rb_gc_mark((VALUE)cc); // pindown
}
}
}
if (FL_TEST_RAW(iseq, ISEQ_NOT_LOADED_YET)) {
@ -663,6 +677,9 @@ finish_iseq_build(rb_iseq_t *iseq)
rb_exc_raise(err);
}
RB_DEBUG_COUNTER_INC(iseq_num);
RB_DEBUG_COUNTER_ADD(iseq_cd_num, iseq->body->ci_size);
rb_iseq_init_trace(iseq);
return Qtrue;
}

Просмотреть файл

@ -69,8 +69,12 @@ typedef struct rb_callable_method_entry_struct { /* same fields with rb_method_e
#define METHOD_ENTRY_VISI(me) (rb_method_visibility_t)(((me)->flags & (IMEMO_FL_USER0 | IMEMO_FL_USER1)) >> (IMEMO_FL_USHIFT+0))
#define METHOD_ENTRY_BASIC(me) (int) (((me)->flags & (IMEMO_FL_USER2 )) >> (IMEMO_FL_USHIFT+2))
#define METHOD_ENTRY_COMPLEMENTED(me) ((me)->flags & IMEMO_FL_USER3)
#define METHOD_ENTRY_COMPLEMENTED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER3)
#define METHOD_ENTRY_COMPLEMENTED(me) ((me)->flags & IMEMO_FL_USER3)
#define METHOD_ENTRY_COMPLEMENTED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER3)
#define METHOD_ENTRY_CACHED(me) ((me)->flags & IMEMO_FL_USER4)
#define METHOD_ENTRY_CACHED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER4)
#define METHOD_ENTRY_INVALIDATED(me) ((me)->flags & IMEMO_FL_USER5)
#define METHOD_ENTRY_INVALIDATED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER5)
static inline void
METHOD_ENTRY_VISI_SET(rb_method_entry_t *me, rb_method_visibility_t visi)
@ -229,4 +233,7 @@ void rb_scope_visibility_set(rb_method_visibility_t);
VALUE rb_unnamed_parameters(int arity);
void rb_clear_method_cache(VALUE klass_or_module, ID mid);
void rb_clear_method_cache_all(void);
#endif /* RUBY_METHOD_H */

19
mjit.c
Просмотреть файл

@ -25,6 +25,9 @@
#include "internal/warnings.h"
#include "mjit_worker.c"
#include "vm_callinfo.h"
static void create_unit(const rb_iseq_t *iseq);
// Copy ISeq's states so that race condition does not happen on compilation.
static void
@ -51,14 +54,18 @@ mjit_copy_job_handler(void *data)
}
const struct rb_iseq_constant_body *body = job->iseq->body;
if (job->cc_entries) {
unsigned int i;
struct rb_call_cache *sink = job->cc_entries;
const struct rb_call_data *calls = body->call_data;
for (i = 0; i < body->ci_size; i++) {
*sink++ = calls[i].cc;
unsigned int ci_size = body->ci_size;
if (ci_size > 0) {
const struct rb_callcache **cc_entries = ALLOC_N(const struct rb_callcache *, ci_size);
if (body->jit_unit == NULL) {
create_unit(job->iseq);
}
body->jit_unit->cc_entries = cc_entries;
for (unsigned int i=0; i<ci_size; i++) {
cc_entries[i] = body->call_data[i].cc;
}
}
if (job->is_entries) {
memcpy(job->is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size);
}

29
mjit.h
Просмотреть файл

@ -70,6 +70,35 @@ struct rb_mjit_compile_info {
bool disable_inlining;
};
// The unit structure that holds metadata of ISeq for MJIT.
struct rb_mjit_unit {
// Unique order number of unit.
int id;
// Dlopen handle of the loaded object file.
void *handle;
rb_iseq_t *iseq;
#ifndef _MSC_VER
// This value is always set for `compact_all_jit_code`. Also used for lazy deletion.
char *o_file;
// true if it's inherited from parent Ruby process and lazy deletion should be skipped.
// `o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
// by child for `compact_all_jit_code`.
bool o_file_inherited_p;
#endif
#if defined(_WIN32)
// DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted.
char *so_file;
#endif
// Only used by unload_units. Flag to check this unit is currently on stack or not.
char used_code_p;
struct list_node unode;
// mjit_compile's optimization switches
struct rb_mjit_compile_info compile_info;
// captured CC values, they should be marked with iseq.
const struct rb_callcache **cc_entries; // size: iseq->body->ci_size
};
typedef VALUE (*mjit_func_t)(rb_execution_context_t *, rb_control_frame_t *);
RUBY_SYMBOL_EXPORT_BEGIN

Просмотреть файл

@ -41,9 +41,9 @@ call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body)
// For propagating information needed for lazily pushing a frame.
struct inlined_call_context {
int orig_argc; // ci->orig_argc
VALUE me; // cc->me
int param_size; // def_iseq_ptr(cc->me->def)->body->param.size
int local_size; // def_iseq_ptr(cc->me->def)->body->local_table_size
VALUE me; // vm_cc_cme(cc)
int param_size; // def_iseq_ptr(vm_cc_cme(cc)->def)->body->param.size
int local_size; // def_iseq_ptr(vm_cc_cme(cc)->def)->body->local_table_size
};
// Storage to keep compiler's status. This should have information
@ -57,7 +57,6 @@ struct compile_status {
bool local_stack_p;
// Safely-accessible cache entries copied from main thread.
union iseq_inline_storage_entry *is_entries;
struct rb_call_cache *cc_entries;
// Mutated optimization levels
struct rb_mjit_compile_info *compile_info;
// If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there.
@ -79,13 +78,11 @@ struct case_dispatch_var {
VALUE last_value;
};
// Returns true if call cache is still not obsoleted and cc->me->def->type is available.
// Returns true if call cache is still not obsoleted and vm_cc_cme(cc)->def->type is available.
static bool
has_valid_method_type(CALL_CACHE cc)
{
extern bool mjit_valid_class_serial_p(rb_serial_t class_serial);
return GET_GLOBAL_METHOD_STATE() == cc->method_state
&& mjit_valid_class_serial_p(cc->class_serial[0]) && cc->me;
return vm_cc_cme(cc) != NULL;
}
// Returns true if iseq can use fastpath for setup, otherwise NULL. This becomes true in the same condition
@ -276,7 +273,8 @@ compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct
fprintf(f, " return Qundef;\n");
}
extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries);
extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq,
union iseq_inline_storage_entry *is_entries);
static bool
mjit_compile_body(FILE *f, const rb_iseq_t *iseq, struct compile_status *status)
@ -368,8 +366,6 @@ inlinable_iseq_p(const struct rb_iseq_constant_body *body)
.stack_size_for_pos = (int *)alloca(sizeof(int) * body->iseq_size), \
.inlined_iseqs = compile_root_p ? \
alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \
.cc_entries = body->ci_size > 0 ? \
alloca(sizeof(struct rb_call_cache) * body->ci_size) : NULL, \
.is_entries = (body->is_size > 0) ? \
alloca(sizeof(union iseq_inline_storage_entry) * body->is_size) : NULL, \
.compile_info = compile_root_p ? \
@ -394,17 +390,18 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
#else
int insn = (int)body->iseq_encoded[pos];
#endif
if (insn == BIN(opt_send_without_block)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block`
CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1];
const struct rb_callinfo *ci = cd->ci;
CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body); // use copy to avoid race condition
const struct rb_callcache *cc = iseq->body->jit_unit->cc_entries[call_data_index(cd, body)]; // use copy to avoid race condition
const rb_iseq_t *child_iseq;
if (has_valid_method_type(cc_copy) &&
!(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path
cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, child_iseq = def_iseq_ptr(cc_copy->me->def)) && // CC_SET_FASTPATH in vm_callee_setup_arg
inlinable_iseq_p(child_iseq->body)) {
if (has_valid_method_type(cc) &&
!(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path
vm_cc_cme(cc)->def->type == VM_METHOD_TYPE_ISEQ &&
fastpath_applied_iseq_p(ci, cc, child_iseq = def_iseq_ptr(vm_cc_cme(cc)->def)) &&
// CC_SET_FASTPATH in vm_callee_setup_arg
inlinable_iseq_p(child_iseq->body)) {
status->inlined_iseqs[pos] = child_iseq->body;
if (mjit_opts.verbose >= 1) // print beforehand because ISeq may be GCed during copy job.
@ -418,12 +415,12 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
INIT_COMPILE_STATUS(child_status, child_iseq->body, false);
child_status.inline_context = (struct inlined_call_context){
.orig_argc = vm_ci_argc(ci),
.me = (VALUE)cc_copy->me,
.me = (VALUE)vm_cc_cme(cc),
.param_size = child_iseq->body->param.size,
.local_size = child_iseq->body->local_table_size
};
if ((child_status.cc_entries != NULL || child_status.is_entries != NULL)
&& !mjit_copy_cache_from_main_thread(child_iseq, child_status.cc_entries, child_status.is_entries))
if ((child_iseq->body->ci_size > 0 || child_status.is_entries != NULL)
&& !mjit_copy_cache_from_main_thread(child_iseq, child_status.is_entries))
return false;
fprintf(f, "ALWAYS_INLINE(static VALUE _mjit_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq));\n", pos);
@ -454,9 +451,10 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname)
struct compile_status status;
INIT_COMPILE_STATUS(status, iseq->body, true);
if ((status.cc_entries != NULL || status.is_entries != NULL)
&& !mjit_copy_cache_from_main_thread(iseq, status.cc_entries, status.is_entries))
if ((iseq->body->ci_size > 0 || status.is_entries != NULL)
&& !mjit_copy_cache_from_main_thread(iseq, status.is_entries)) {
return false;
}
if (!status.compile_info->disable_send_cache && !status.compile_info->disable_inlining) {
if (!precompile_inlinable_iseqs(f, iseq, &status))

Просмотреть файл

@ -122,32 +122,6 @@ typedef intptr_t pid_t;
#define MJIT_TMP_PREFIX "_ruby_mjit_"
// The unit structure that holds metadata of ISeq for MJIT.
struct rb_mjit_unit {
// Unique order number of unit.
int id;
// Dlopen handle of the loaded object file.
void *handle;
rb_iseq_t *iseq;
#ifndef _MSC_VER
// This value is always set for `compact_all_jit_code`. Also used for lazy deletion.
char *o_file;
// true if it's inherited from parent Ruby process and lazy deletion should be skipped.
// `o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
// by child for `compact_all_jit_code`.
bool o_file_inherited_p;
#endif
#if defined(_WIN32)
// DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted.
char *so_file;
#endif
// Only used by unload_units. Flag to check this unit is currently on stack or not.
char used_code_p;
struct list_node unode;
// mjit_compile's optimization switches
struct rb_mjit_compile_info compile_info;
};
// Linked list of struct rb_mjit_unit.
struct rb_mjit_unit_list {
struct list_head head;
@ -1117,7 +1091,6 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
typedef struct {
const rb_iseq_t *iseq;
struct rb_call_cache *cc_entries;
union iseq_inline_storage_entry *is_entries;
bool finish_p;
} mjit_copy_job_t;
@ -1138,7 +1111,7 @@ int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *);
// We're lazily copying cache values from main thread because these cache values
// could be different between ones on enqueue timing and ones on dequeue timing.
bool
mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries)
mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, union iseq_inline_storage_entry *is_entries)
{
mjit_copy_job_t *job = &mjit_copy_job; // just a short hand
@ -1146,7 +1119,6 @@ mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc
job->finish_p = true; // disable dispatching this job in mjit_copy_job_handler while it's being modified
CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread");
job->cc_entries = cc_entries;
job->is_entries = is_entries;
CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread");

Просмотреть файл

@ -10,33 +10,25 @@ class TestTracepointObj < Test::Unit::TestCase
end
def test_tracks_objspace_events
result = Bug.tracepoint_track_objspace_events{
Object.new
}
object_new_newobj = result[0]
result = EnvUtil.suppress_warning {eval(<<-EOS, nil, __FILE__, __LINE__+1)}
Bug.tracepoint_track_objspace_events {
99
'abc'
_="foobar"
Object.new
nil
}
EOS
newobj_count, free_count, gc_start_count, gc_end_mark_count, gc_end_sweep_count, *newobjs = *result
assert_equal 1 + object_new_newobj, newobj_count
assert_equal 1 + object_new_newobj, newobjs.size
assert_equal 1, newobj_count
assert_equal 1, newobjs.size
assert_equal 'foobar', newobjs[0]
assert_equal Object, newobjs[1].class
assert_operator free_count, :>=, 0
assert_operator gc_start_count, :==, gc_end_mark_count
assert_operator gc_start_count, :>=, gc_end_sweep_count
end
def test_tracks_objspace_count
return
stat1 = {}
stat2 = {}
GC.disable

Просмотреть файл

@ -94,6 +94,9 @@ class TestGc < Test::Unit::TestCase
GC.start
GC.stat(stat)
ObjectSpace.count_objects(count)
# repeat same methods invocation for cache object creation.
GC.stat(stat)
ObjectSpace.count_objects(count)
assert_equal(count[:TOTAL]-count[:FREE], stat[:heap_live_slots])
assert_equal(count[:FREE], stat[:heap_free_slots])

Просмотреть файл

@ -0,0 +1,64 @@
# -*- coding: us-ascii -*-
# frozen_string_literal: true
require 'test/unit'
class TestMethod < Test::Unit::TestCase
def test_alias
m0 = Module.new do
def foo; :M0 end
end
m1 = Module.new do
include m0
end
c = Class.new do
include m1
alias bar foo
end
d = Class.new(c) do
end
test = -> do
d.new.bar
end
assert_equal :M0, test[]
c.class_eval do
def bar
:C
end
end
assert_equal :C, test[]
end
def test_zsuper
assert_separately [], <<-EOS
class C
private def foo
:C
end
end
class D < C
public :foo
end
class E < D; end
class F < E; end
test = -> do
F.new().foo
end
assert_equal :C, test[]
class E
def foo; :E; end
end
assert_equal :E, test[]
EOS
end
end

Просмотреть файл

@ -24,7 +24,7 @@ static VALUE
#{fname(param, local)}(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
return vm_call_iseq_setup_normal(ec, cfp, calling, cd->cc.me, 0, #{param}, #{local});
return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cd->cc), 0, #{param}, #{local});
}
EOS

Просмотреть файл

@ -14,9 +14,9 @@
MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
% end
% # compiler: Use copied cc to avoid race condition
CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body);
const struct rb_callcache *captured_cc = body->jit_unit->cc_entries[call_data_index(cd, body)];
%
if (!status->compile_info->disable_send_cache && has_valid_method_type(cc_copy)) {
if (!status->compile_info->disable_send_cache && has_valid_method_type(captured_cc)) {
const rb_iseq_t *iseq;
const CALL_INFO ci = cd->ci;
unsigned int argc = vm_ci_argc(ci); // this `argc` variable is for calculating a value's position on stack considering `blockarg`.
@ -25,7 +25,10 @@
% end
if (!(vm_ci_flag(ci) & VM_CALL_TAILCALL) // inlining non-tailcall path
&& cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, iseq = def_iseq_ptr(cc_copy->me->def))) { // CC_SET_FASTPATH in vm_callee_setup_arg
&& vm_cc_cme(captured_cc)->def->type == VM_METHOD_TYPE_ISEQ
&& fastpath_applied_iseq_p(ci, captured_cc, iseq = def_iseq_ptr(vm_cc_cme(captured_cc)->def))) {
// CC_SET_FASTPATH in vm_callee_setup_arg
int param_size = iseq->body->param.size;
fprintf(f, "{\n");
@ -35,8 +38,10 @@
}
% # JIT: Invalidate call cache if it requires vm_search_method. This allows to inline some of following things.
fprintf(f, " if (UNLIKELY(GET_GLOBAL_METHOD_STATE() != %"PRI_SERIALT_PREFIX"u ||\n", cc_copy->method_state);
fprintf(f, " RCLASS_SERIAL(CLASS_OF(stack[%d])) != %"PRI_SERIALT_PREFIX"u)) {\n", b->stack_size - 1 - argc, cc_copy->class_serial[0]);
fprintf(f, " const struct rb_call_data *cd = (const struct rb_callcache *)0x%"PRIxVALUE";\n", (VALUE)cd);
fprintf(f, " const struct rb_callcache *cc = (const struct rb_callcache *)0x%"PRIxVALUE";\n", (VALUE)captured_cc);
fprintf(f, " if (UNLIKELY(cd->cc != cc || !vm_cc_valid_p(cc, CLASS_OF(stack[%d])))) {\n", b->stack_size - 1 - argc);
// TODO: need to free cc
fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", pos);
fprintf(f, " reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size);
fprintf(f, " goto send_cancel;\n");
@ -59,18 +64,18 @@
fprintf(f, " {\n");
fprintf(f, " struct rb_calling_info calling;\n");
% if insn.name == 'send'
fprintf(f, " calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, (CALL_INFO)0x%"PRIxVALUE", (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", (VALUE)ci, (VALUE)blockiseq);
fprintf(f, " calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, cd->ci, (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", (VALUE)blockiseq);
% else
fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n");
% end
fprintf(f, " calling.argc = %d;\n", vm_ci_argc(ci));
fprintf(f, " calling.recv = stack[%d];\n", b->stack_size - 1 - argc);
% # JIT: Special CALL_METHOD. Bypass cc_copy->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH.
% # JIT: Special CALL_METHOD. Bypass captured_cc->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH.
fprintf(f, " {\n");
fprintf(f, " VALUE v;\n");
fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, (const rb_callable_method_entry_t *)0x%"PRIxVALUE", 0, %d, %d);\n",
(VALUE)cc_copy->me, param_size, iseq->body->local_table_size); // fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE
fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, vm_cc_cme(cc), 0, %d, %d);\n",
param_size, iseq->body->local_table_size); // fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE
if (iseq->body->catch_except_p) {
fprintf(f, " VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n");
fprintf(f, " v = vm_exec(ec, TRUE);\n");

Просмотреть файл

@ -57,7 +57,7 @@ switch (insn) {
% when *send_compatible_opt_insns
% # To avoid cancel, just emit `opt_send_without_block` instead of `opt_*` insn if call cache is populated.
% cd_index = insn.opes.index { |o| o.fetch(:type) == 'CALL_DATA' }
if (has_valid_method_type(status->cc_entries + call_data_index((CALL_DATA)operands[<%= cd_index %>], body))) {
if (has_valid_method_type(body->jit_unit->cc_entries[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)])) {
<%= render 'mjit_compile_send', locals: { insn: opt_send_without_block } -%>
<%= render 'mjit_compile_insn', locals: { insn: opt_send_without_block } -%>
break;

26
vm.c
Просмотреть файл

@ -386,6 +386,8 @@ rb_serial_t ruby_vm_global_method_state = 1;
rb_serial_t ruby_vm_global_constant_state = 1;
rb_serial_t ruby_vm_class_serial = 1;
const struct rb_callcache *vm_empty_cc;
static void thread_free(void *ptr);
void
@ -2806,8 +2808,9 @@ static VALUE
m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
{
REWIND_CFP({
rb_undef(cbase, SYM2ID(sym));
rb_clear_method_cache_by_class(self);
ID mid = SYM2ID(sym);
rb_undef(cbase, mid);
rb_clear_method_cache(self, mid);
});
return Qnil;
}
@ -2962,6 +2965,13 @@ f_lambda(VALUE _)
return rb_block_lambda();
}
static VALUE
vm_mtbl(VALUE self, VALUE obj, VALUE sym)
{
vm_mtbl_dump(CLASS_OF(obj), SYM2ID(sym));
return Qnil;
}
void
Init_VM(void)
{
@ -3249,9 +3259,11 @@ Init_VM(void)
#if VMDEBUG
rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
rb_define_singleton_method(rb_cRubyVM, "mtbl", vm_mtbl, 2);
#else
(void)sdr;
(void)nsdr;
(void)vm_mtbl;
#endif
/* VM bootstrap: phase 2 */
@ -3348,6 +3360,10 @@ Init_vm_objects(void)
vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000);
rb_objspace_gc_enable(vm->objspace);
vm_empty_cc = vm_cc_new(0, NULL, vm_call_general);
FL_SET_RAW(vm_empty_cc, VM_CALLCACHE_UNMARKABLE);
rb_gc_register_mark_object((VALUE)vm_empty_cc);
}
/* top self */
@ -3716,6 +3732,12 @@ vm_collect_usage_register(int reg, int isset)
}
#endif
MJIT_FUNC_EXPORTED const struct rb_callcache *
rb_vm_empty_cc(void)
{
return vm_empty_cc;
}
#endif /* #ifndef MJIT_HEADER */
#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */

Просмотреть файл

@ -75,13 +75,13 @@ struct rb_callinfo {
#define CI_EMBED_FLAG 0x01
#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
#define CI_EMBED_ARGC_MASK ((1UL<<CI_EMBED_ARGC_bits) - 1)
#define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
#define CI_EMBED_FLAG_MASK ((1UL<<CI_EMBED_FLAG_bits) - 1)
#define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
#define CI_EMBED_ID_MASK ((1UL<<CI_EMBED_ID_bits) - 1)
#define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
static inline int
static inline bool
vm_ci_packed_p(const struct rb_callinfo *ci)
{
#if USE_EMBED_CI
@ -89,7 +89,7 @@ vm_ci_packed_p(const struct rb_callinfo *ci)
return 1;
}
else {
VM_ASSERT(imemo_type_p((VALUE)ci, imemo_callinfo));
VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
return 0;
}
#else
@ -97,6 +97,17 @@ vm_ci_packed_p(const struct rb_callinfo *ci)
#endif
}
static inline bool
vm_ci_p(const struct rb_callinfo *ci)
{
if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
return 1;
}
else {
return 0;
}
}
static inline ID
vm_ci_mid(const struct rb_callinfo *ci)
{
@ -141,7 +152,6 @@ vm_ci_kwarg(const struct rb_callinfo *ci)
}
}
#if 0 // for debug
static inline void
vm_ci_dump(const struct rb_callinfo *ci)
{
@ -153,7 +163,6 @@ vm_ci_dump(const struct rb_callinfo *ci)
rp(ci);
}
}
#endif
#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
@ -162,12 +171,11 @@ static inline const struct rb_callinfo *
vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
{
#if USE_EMBED_CI
if ((mid & ~CI_EMBED_ID_MASK) == 0 &&
(argc & ~CI_EMBED_ARGC_MASK) == 0 &&
kwarg == NULL) {
VALUE embed_ci =
1L |
1L |
((VALUE)argc << CI_EMBED_ARGC_SHFT) |
((VALUE)flag << CI_EMBED_FLAG_SHFT) |
((VALUE)mid << CI_EMBED_ID_SHFT);
@ -175,8 +183,11 @@ vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinf
return (const struct rb_callinfo *)embed_ci;
}
#endif
const bool debug = 0;
if (debug) fprintf(stderr, "%s:%d ", file, line);
// TODO: dedup
const struct rb_callinfo *ci = (const struct rb_callinfo *)
rb_imemo_new(imemo_callinfo,
(VALUE)mid,
@ -204,3 +215,209 @@ vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb
RB_DEBUG_COUNTER_INC(ci_runtime);
return vm_ci_new_(mid, flag, argc, kwarg, file, line);
}
typedef VALUE (*vm_call_handler)(
struct rb_execution_context_struct *ec,
struct rb_control_frame_struct *cfp,
struct rb_calling_info *calling,
struct rb_call_data *cd);
// imemo_callcache
struct rb_callcache {
const VALUE flags;
/* inline cache: key */
const VALUE klass; // should not mark it because klass can not be free'd
// because of this marking. When klass is collected,
// cc will be cleared (cc->klass = 0) at vm_ccs_free().
/* inline cache: values */
const struct rb_callable_method_entry_struct * const cme_;
const vm_call_handler call_;
union {
const unsigned int attr_index;
const enum method_missing_reason method_missing_reason; /* used by method_missing */
} aux_;
};
#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER0
static inline const struct rb_callcache *
vm_cc_new(VALUE klass,
const struct rb_callable_method_entry_struct *cme,
vm_call_handler call)
{
const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
RB_DEBUG_COUNTER_INC(cc_new);
return cc;
}
static inline const struct rb_callcache *
vm_cc_fill(struct rb_callcache *cc,
VALUE klass,
const struct rb_callable_method_entry_struct *cme,
vm_call_handler call)
{
struct rb_callcache cc_body = {
.flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
.klass = klass,
.cme_ = cme,
.call_ = call,
};
MEMCPY(cc, &cc_body, struct rb_callcache, 1);
return cc;
}
static inline bool
vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc->klass == 0 ||
RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
return cc->klass == klass;
}
static inline const struct rb_callable_method_entry_struct *
vm_cc_cme(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->cme_;
}
static inline vm_call_handler
vm_cc_call(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->call_;
}
static inline unsigned int
vm_cc_attr_index(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.attr_index;
}
static inline unsigned int
vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.method_missing_reason;
}
static inline int
vm_cc_markable(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return FL_TEST_RAW(cc, VM_CALLCACHE_UNMARKABLE) == 0;
}
static inline bool
vm_cc_valid_p(const struct rb_callcache *cc, VALUE klass)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
if (cc->klass == klass && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
return 1;
}
else {
return 0;
}
}
#ifndef MJIT_HEADER
extern const struct rb_callcache *vm_empty_cc;
#else
extern const struct rb_callcache *rb_vm_empty_cc(void);
#endif
static inline const struct rb_callcache *
vm_cc_empty(void)
{
#ifndef MJIT_HEADER
return vm_empty_cc;
#else
return rb_vm_empty_cc();
#endif
}
/* callcache: mutete */
static inline void
vm_cc_cme_set(const struct rb_callcache *cc, const struct rb_callable_method_entry_struct *cme)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
VM_ASSERT(vm_cc_cme(cc) != NULL);
VM_ASSERT(vm_cc_cme(cc)->called_id == cme->called_id);
VM_ASSERT(!vm_cc_markable(cc)); // only used for vm_eval.c
*((const struct rb_callable_method_entry_struct **)&cc->cme_) = cme;
}
static inline void
vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*(vm_call_handler *)&cc->call_ = call;
}
static inline void
vm_cc_attr_index_set(const struct rb_callcache *cc, int index)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*(int *)&cc->aux_.attr_index = index;
}
static inline void
vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
}
static inline void
vm_cc_invalidate(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
VM_ASSERT(cc->klass != 0); // should be enable
*(VALUE *)&cc->klass = 0;
RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
}
/* calldata */
struct rb_call_data {
const struct rb_callinfo *ci;
const struct rb_callcache *cc;
};
struct rb_class_cc_entries {
#if VM_CHECK_MODE > 0
VALUE debug_sig;
#endif
int capa;
int len;
const struct rb_callable_method_entry_struct *cme;
struct rb_class_cc_entries_entry {
const struct rb_callinfo *ci;
const struct rb_callcache *cc;
} *entries;
};
#if VM_CHECK_MODE > 0
static inline bool
vm_ccs_p(const struct rb_class_cc_entries *ccs)
{
return ccs->debug_sig == ~(VALUE)ccs;
}
#endif
// gc.c
void rb_vm_ccs_free(struct rb_class_cc_entries *ccs);

Просмотреть файл

@ -253,7 +253,6 @@ struct rb_calling_info {
};
struct rb_execution_context_struct;
typedef VALUE (*vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
#if 1
#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
@ -1088,7 +1087,7 @@ typedef struct iseq_inline_cache_entry *IC;
typedef struct iseq_inline_iv_cache_entry *IVC;
typedef union iseq_inline_storage_entry *ISE;
typedef const struct rb_callinfo *CALL_INFO;
typedef struct rb_call_cache *CALL_CACHE;
typedef const struct rb_callcache *CALL_CACHE;
typedef struct rb_call_data *CALL_DATA;
typedef VALUE CDHASH;

Просмотреть файл

@ -111,7 +111,7 @@ control_frame_dump(const rb_execution_context_t *ec, const rb_control_frame_t *c
}
if (cfp->iseq != 0) {
#define RUBY_VM_IFUNC_P(ptr) imemo_type_p((VALUE)ptr, imemo_ifunc)
#define RUBY_VM_IFUNC_P(ptr) IMEMO_TYPE_P(ptr, imemo_ifunc)
if (RUBY_VM_IFUNC_P(cfp->iseq)) {
iseq_name = "<ifunc>";
}
@ -167,7 +167,7 @@ control_frame_dump(const rb_execution_context_t *ec, const rb_control_frame_t *c
char buff[0x100];
if (me) {
if (imemo_type_p((VALUE)me, imemo_ment)) {
if (IMEMO_TYPE_P(me, imemo_ment)) {
fprintf(stderr, " me:\n");
fprintf(stderr, " called_id: %s, type: %s\n", rb_id2name(me->called_id), rb_method_type_name(me->def->type));
fprintf(stderr, " owner class: %s\n", rb_raw_obj_info(buff, 0x100, me->owner));

Просмотреть файл

@ -47,7 +47,8 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE
{
struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, };
const struct rb_callinfo *ci = vm_ci_new_runtime(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL);
const struct rb_call_cache cc = { 0, { 0, }, me, me->def->method_serial, vm_call_general, { 0, }, };
struct rb_callcache cc_body;
const struct rb_callcache *cc = vm_cc_fill(&cc_body, 0, me, vm_call_general);
struct rb_call_data cd = { ci, cc, };
return vm_call0_body(ec, &calling, &cd, argv);
}
@ -56,9 +57,9 @@ static VALUE
vm_call0_cfunc_with_frame(rb_execution_context_t* ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv)
{
const struct rb_callinfo *ci = cd->ci;
const struct rb_call_cache *cc = &cd->cc;
const struct rb_callcache *cc = cd->cc;
VALUE val;
const rb_callable_method_entry_t *me = cc->me;
const rb_callable_method_entry_t *me = vm_cc_cme(cc);
const rb_method_cfunc_t *cfunc = UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
int len = cfunc->argc;
VALUE recv = calling->recv;
@ -109,14 +110,14 @@ static VALUE
vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv)
{
const struct rb_callinfo *ci = cd->ci;
struct rb_call_cache *cc = &cd->cc;
const struct rb_callcache *cc = cd->cc;
VALUE ret;
calling->block_handler = vm_passed_block_handler(ec);
again:
switch (cc->me->def->type) {
switch (vm_cc_cme(cc)->def->type) {
case VM_METHOD_TYPE_ISEQ:
{
rb_control_frame_t *reg_cfp = ec->cfp;
@ -147,7 +148,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
}
rb_check_arity(calling->argc, 1, 1);
ret = rb_ivar_set(calling->recv, cc->me->def->body.attr.id, argv[0]);
ret = rb_ivar_set(calling->recv, vm_cc_cme(cc)->def->body.attr.id, argv[0]);
goto success;
case VM_METHOD_TYPE_IVAR:
if (calling->kw_splat &&
@ -158,7 +159,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
}
rb_check_arity(calling->argc, 0, 0);
ret = rb_attr_get(calling->recv, cc->me->def->body.attr.id);
ret = rb_attr_get(calling->recv, vm_cc_cme(cc)->def->body.attr.id);
goto success;
case VM_METHOD_TYPE_BMETHOD:
ret = vm_call_bmethod_body(ec, calling, cd, argv);
@ -166,21 +167,21 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
case VM_METHOD_TYPE_ZSUPER:
case VM_METHOD_TYPE_REFINED:
{
const rb_method_type_t type = cc->me->def->type;
VALUE super_class = cc->me->defined_class;
const rb_method_type_t type = vm_cc_cme(cc)->def->type;
VALUE super_class = vm_cc_cme(cc)->defined_class;
if (type == VM_METHOD_TYPE_ZSUPER) {
super_class = RCLASS_ORIGIN(super_class);
}
else if (cc->me->def->body.refined.orig_me) {
CC_SET_ME(cc, refined_method_callable_without_refinement(cc->me));
goto again;
else if (vm_cc_cme(cc)->def->body.refined.orig_me) {
vm_cc_cme_set(cc, refined_method_callable_without_refinement(vm_cc_cme(cc)));
goto again;
}
super_class = RCLASS_SUPER(super_class);
if (super_class) {
CC_SET_ME(cc, rb_callable_method_entry(super_class, vm_ci_mid(ci)));
if (cc->me) {
vm_cc_cme_set(cc, rb_callable_method_entry(super_class, vm_ci_mid(ci)));
if (vm_cc_cme(cc)) {
RUBY_VM_CHECK_INTS(ec);
goto again;
}
@ -191,7 +192,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
goto success;
}
case VM_METHOD_TYPE_ALIAS:
CC_SET_ME(cc, aliased_callable_method_entry(cc->me));
vm_cc_cme_set(cc, aliased_callable_method_entry(vm_cc_cme(cc)));
goto again;
case VM_METHOD_TYPE_MISSING:
{
@ -200,7 +201,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
argv, MISSING_NOENTRY, calling->kw_splat);
}
case VM_METHOD_TYPE_OPTIMIZED:
switch (cc->me->def->body.optimize_type) {
switch (vm_cc_cme(cc)->def->body.optimize_type) {
case OPTIMIZED_METHOD_TYPE_SEND:
ret = send_internal(calling->argc, argv, calling->recv, calling->kw_splat ? CALL_FCALL_KW : CALL_FCALL);
goto success;
@ -212,13 +213,13 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
goto success;
}
default:
rb_bug("vm_call0: unsupported optimized method type (%d)", cc->me->def->body.optimize_type);
rb_bug("vm_call0: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimize_type);
}
break;
case VM_METHOD_TYPE_UNDEF:
break;
}
rb_bug("vm_call0: unsupported method type (%d)", cc->me->def->type);
rb_bug("vm_call0: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
return Qundef;
success:
@ -359,7 +360,7 @@ struct rescue_funcall_args {
VALUE recv;
ID mid;
rb_execution_context_t *ec;
const rb_method_entry_t *me;
const rb_callable_method_entry_t *cme;
unsigned int respond: 1;
unsigned int respond_to_missing: 1;
int argc;
@ -373,7 +374,7 @@ check_funcall_exec(VALUE v)
struct rescue_funcall_args *args = (void *)v;
return call_method_entry(args->ec, args->defined_class,
args->recv, idMethodMissing,
args->me, args->argc, args->argv, args->kw_splat);
args->cme, args->argc, args->argv, args->kw_splat);
}
static VALUE
@ -417,7 +418,7 @@ static VALUE
check_funcall_missing(rb_execution_context_t *ec, VALUE klass, VALUE recv, ID mid, int argc, const VALUE *argv, int respond, VALUE def, int kw_splat)
{
struct rescue_funcall_args args;
const rb_method_entry_t *me;
const rb_callable_method_entry_t *cme;
VALUE ret = Qundef;
ret = basic_obj_respond_to_missing(ec, klass, recv,
@ -426,8 +427,9 @@ check_funcall_missing(rb_execution_context_t *ec, VALUE klass, VALUE recv, ID mi
args.respond = respond > 0;
args.respond_to_missing = (ret != Qundef);
ret = def;
me = method_entry_get(klass, idMethodMissing, &args.defined_class);
if (me && !METHOD_ENTRY_BASIC(me)) {
cme = callable_method_entry(klass, idMethodMissing, &args.defined_class);
if (cme && !METHOD_ENTRY_BASIC(cme)) {
VALUE argbuf, *new_args = ALLOCV_N(VALUE, argbuf, argc+1);
new_args[0] = ID2SYM(mid);
@ -442,7 +444,7 @@ check_funcall_missing(rb_execution_context_t *ec, VALUE klass, VALUE recv, ID mi
ec->method_missing_reason = MISSING_NOENTRY;
args.ec = ec;
args.recv = recv;
args.me = me;
args.cme = cme;
args.mid = mid;
args.argc = argc + 1;
args.argv = new_args;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -121,20 +121,13 @@ enum vm_regan_acttype {
*/
static inline void
CC_SET_FASTPATH(CALL_CACHE cc, vm_call_handler func, bool enabled)
CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enabled)
{
if (LIKELY(enabled)) {
cc->call = func;
vm_cc_call_set(cc, func);
}
}
static inline void
CC_SET_ME(CALL_CACHE cc, const rb_callable_method_entry_t *me)
{
cc->me = me;
cc->method_serial = me ? me->def->method_serial : 0;
}
#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
/**********************************************************/
@ -258,10 +251,10 @@ THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
can be used as a fastpath. */
static bool
vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_call_cache *cc)
vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_callcache *cc)
{
return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
!(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED);
!(METHOD_ENTRY_VISI(vm_cc_cme(cc)) == METHOD_VISI_PROTECTED);
}
#endif /* RUBY_INSNHELPER_H */

Просмотреть файл

@ -6,25 +6,6 @@
#define METHOD_DEBUG 0
#if OPT_GLOBAL_METHOD_CACHE
#ifndef GLOBAL_METHOD_CACHE_SIZE
#define GLOBAL_METHOD_CACHE_SIZE 0x800
#endif
#define LSB_ONLY(x) ((x) & ~((x) - 1))
#define POWER_OF_2_P(x) ((x) == LSB_ONLY(x))
#if !POWER_OF_2_P(GLOBAL_METHOD_CACHE_SIZE)
# error GLOBAL_METHOD_CACHE_SIZE must be power of 2
#endif
#ifndef GLOBAL_METHOD_CACHE_MASK
#define GLOBAL_METHOD_CACHE_MASK (GLOBAL_METHOD_CACHE_SIZE-1)
#endif
#define GLOBAL_METHOD_CACHE_KEY(c,m) ((((c)>>3)^(m))&(global_method_cache.mask))
#define GLOBAL_METHOD_CACHE(c,m) (global_method_cache.entries + GLOBAL_METHOD_CACHE_KEY(c,m))
#else
#define GLOBAL_METHOD_CACHE(c,m) (rb_bug("global method cache disabled improperly"), NULL)
#endif
static int vm_redefinition_check_flag(VALUE klass);
static void rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass);
@ -37,50 +18,108 @@ static void rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VAL
#define singleton_undefined idSingleton_method_undefined
#define attached id__attached__
struct cache_entry {
rb_serial_t method_state;
rb_serial_t class_serial;
ID mid;
rb_method_entry_t* me;
VALUE defined_class;
};
#if OPT_GLOBAL_METHOD_CACHE
static struct {
unsigned int size;
unsigned int mask;
struct cache_entry *entries;
} global_method_cache = {
GLOBAL_METHOD_CACHE_SIZE,
GLOBAL_METHOD_CACHE_MASK,
};
#endif
#define ruby_running (GET_VM()->running)
/* int ruby_running = 0; */
static void
rb_class_clear_method_cache(VALUE klass, VALUE arg)
static enum rb_id_table_iterator_result
vm_ccs_dump_i(ID mid, VALUE val, void *data)
{
rb_serial_t old_serial = *(rb_serial_t *)arg;
if (RCLASS_SERIAL(klass) > old_serial) {
return;
const struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)val;
fprintf(stderr, " | %s (%d) ", rb_id2name(mid), ccs->len);
rp(ccs->cme);
for (int i=0; i<ccs->len; i++) {
fprintf(stderr, " | [%d] ", i); vm_ci_dump(ccs->entries[i].ci);
rp_m( " | ", ccs->entries[i].cc);
}
mjit_remove_class_serial(RCLASS_SERIAL(klass));
RCLASS_SERIAL(klass) = rb_next_class_serial();
return ID_TABLE_CONTINUE;
}
if (BUILTIN_TYPE(klass) == T_ICLASS) {
struct rb_id_table *table = RCLASS_CALLABLE_M_TBL(klass);
if (table) {
rb_id_table_clear(table);
}
}
else {
VM_ASSERT(RCLASS_CALLABLE_M_TBL(klass) == 0);
static void
vm_ccs_dump(VALUE klass, ID target_mid)
{
struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
if (cc_tbl) {
const struct rb_class_cc_entries *ccs;
if (target_mid) {
if (rb_id_table_lookup(cc_tbl, target_mid, (VALUE *)&ccs)) {
fprintf(stderr, " [CCTB] %p\n", cc_tbl);
vm_ccs_dump_i(target_mid, (VALUE)ccs, NULL);
}
}
else {
fprintf(stderr, " [CCTB] %p\n", cc_tbl);
rb_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid);
}
}
}
rb_class_foreach_subclass(klass, rb_class_clear_method_cache, arg);
static enum rb_id_table_iterator_result
vm_cme_dump_i(ID mid, VALUE val, void *data)
{
ID target_mid = (ID)data;
if (target_mid == 0 || mid == target_mid) {
rp_m(" > ", val);
}
return ID_TABLE_CONTINUE;
}
static VALUE
vm_mtbl_dump(VALUE klass, ID target_mid)
{
fprintf(stderr, "# vm_mtbl\n");
while (klass) {
rp_m(" -> ", klass);
rb_method_entry_t *me;
if (RCLASS_M_TBL(klass)) {
if (target_mid != 0) {
if (rb_id_table_lookup(RCLASS_M_TBL(klass), target_mid, (VALUE *)&me)) {
rp_m(" [MTBL] ", me);
}
}
else {
fprintf(stderr, " ## RCLASS_M_TBL (%p)\n", RCLASS_M_TBL(klass));
rb_id_table_foreach(RCLASS_M_TBL(klass), vm_cme_dump_i, NULL);
}
}
else {
fprintf(stderr, " MTBL: NULL\n");
}
if (RCLASS_CALLABLE_M_TBL(klass)) {
if (target_mid != 0) {
if (rb_id_table_lookup(RCLASS_CALLABLE_M_TBL(klass), target_mid, (VALUE *)&me)) {
rp_m(" [CM**] ", me);
}
}
else {
fprintf(stderr, " ## RCLASS_CALLABLE_M_TBL\n");
rb_id_table_foreach(RCLASS_CALLABLE_M_TBL(klass), vm_cme_dump_i, NULL);
}
}
if (RCLASS_CC_TBL(klass)) {
vm_ccs_dump(klass, target_mid);
}
klass = RCLASS_SUPER(klass);
}
return Qnil;
}
void
rb_vm_mtbl_dump(const char *msg, VALUE klass, ID target_mid)
{
fprintf(stderr, "[%s] ", msg);
vm_mtbl_dump(klass, target_mid);
}
static inline void
vm_me_invalidate_cache(rb_callable_method_entry_t *cme)
{
VM_ASSERT(IMEMO_TYPE_P(cme, imemo_ment));
VM_ASSERT(callable_method_entry_p(cme));
METHOD_ENTRY_INVALIDATED_SET(cme);
RB_DEBUG_COUNTER_INC(cc_cme_invalidate);
}
void
@ -89,33 +128,133 @@ rb_clear_constant_cache(void)
INC_GLOBAL_CONSTANT_STATE();
}
void
rb_clear_method_cache_by_class(VALUE klass)
static rb_method_entry_t *rb_method_entry_alloc(ID called_id, VALUE owner, VALUE defined_class, const rb_method_definition_t *def);
const rb_method_entry_t * rb_method_entry_clone(const rb_method_entry_t *src_me);
static const rb_callable_method_entry_t *copmplemented_callable_method_entry(VALUE klass, ID id);
static void
clear_method_cache_by_id_in_class(VALUE klass, ID mid)
{
if (klass && klass != Qundef) {
int global = klass == rb_cBasicObject || klass == rb_cObject || klass == rb_mKernel;
VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
RUBY_DTRACE_HOOK(METHOD_CACHE_CLEAR, (global ? "global" : rb_class2name(klass)));
if (LIKELY(RCLASS_EXT(klass)->subclasses == NULL)) {
// no subclasses
// check only current class
if (global) {
INC_GLOBAL_METHOD_STATE();
}
else {
rb_serial_t old_serial = PREV_CLASS_SERIAL();
rb_class_clear_method_cache(klass, (VALUE)&old_serial);
}
struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
struct rb_class_cc_entries *ccs;
// invalidate CCs
if (cc_tbl && rb_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
rb_vm_ccs_free(ccs);
rb_id_table_delete(cc_tbl, mid);
RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_ccs);
}
// remove from callable_m_tbl, if exists
struct rb_id_table *cm_tbl;
if ((cm_tbl = RCLASS_CALLABLE_M_TBL(klass)) != NULL) {
rb_id_table_delete(cm_tbl, mid);
RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_callable);
}
RB_DEBUG_COUNTER_INC(cc_invalidate_leaf);
}
else {
const rb_callable_method_entry_t *cme = copmplemented_callable_method_entry(klass, mid);
if (klass == rb_mKernel) {
rb_subclass_entry_t *entry = RCLASS_EXT(klass)->subclasses;
if (cme) {
// invalidate cme if found to invalidate the inline method cache.
for (; entry != NULL; entry = entry->next) {
struct rb_id_table *table = RCLASS_CALLABLE_M_TBL(entry->klass);
if (table)rb_id_table_clear(table);
}
if (METHOD_ENTRY_CACHED(cme)) {
// invalidate cc by invalidating cc->cme
VALUE owner = cme->owner;
rb_callable_method_entry_t *new_cme =
(rb_callable_method_entry_t *)rb_method_entry_clone((const rb_method_entry_t *)cme);
struct rb_id_table *mtbl = RCLASS_M_TBL(RCLASS_ORIGIN(owner));
rb_id_table_insert(mtbl, mid, (VALUE)new_cme);
RB_OBJ_WRITTEN(owner, cme, new_cme);
vm_me_invalidate_cache((rb_callable_method_entry_t *)cme);
RB_DEBUG_COUNTER_INC(cc_invalidate_tree_cme);
}
// invalidate complement tbl
if (METHOD_ENTRY_COMPLEMENTED(cme)) {
VALUE defined_class = cme->defined_class;
struct rb_id_table *cm_tbl = RCLASS_CALLABLE_M_TBL(defined_class);
VM_ASSERT(cm_tbl != NULL);
int r = rb_id_table_delete(cm_tbl, mid);
VM_ASSERT(r == TRUE); (void)r;
RB_DEBUG_COUNTER_INC(cc_invalidate_tree_callable);
}
RB_DEBUG_COUNTER_INC(cc_invalidate_tree);
}
}
}
static void
clear_iclass_method_cache_by_id(VALUE iclass, VALUE d)
{
VM_ASSERT(RB_TYPE_P(iclass, T_ICLASS));
ID mid = (ID)d;
clear_method_cache_by_id_in_class(iclass, mid);
}
static void
clear_iclass_method_cache_by_id_for_refinements(VALUE klass, VALUE d)
{
if (RB_TYPE_P(klass, T_ICLASS)) {
ID mid = (ID)d;
clear_method_cache_by_id_in_class(klass, mid);
}
}
void
rb_clear_method_cache(VALUE klass_or_module, ID mid)
{
if (RB_TYPE_P(klass_or_module, T_MODULE)) {
VALUE module = klass_or_module; // alias
if (FL_TEST(module, RMODULE_IS_REFINEMENT)) {
VALUE refined_class = rb_refinement_module_get_refined_class(module);
rb_clear_method_cache(refined_class, mid);
rb_class_foreach_subclass(refined_class, clear_iclass_method_cache_by_id_for_refinements, mid);
}
rb_class_foreach_subclass(module, clear_iclass_method_cache_by_id, mid);
}
else {
clear_method_cache_by_id_in_class(klass_or_module, mid);
}
}
// gc.c
void rb_cc_table_free(VALUE klass);
static int
invalidate_all_cc(void *vstart, void *vend, size_t stride, void *data)
{
VALUE v = (VALUE)vstart;
for (; v != (VALUE)vend; v += stride) {
if (RBASIC(v)->flags) { // liveness check
if (RB_TYPE_P(v, T_CLASS) ||
RB_TYPE_P(v, T_ICLASS)) {
if (RCLASS_CC_TBL(v)) {
rb_cc_table_free(v);
}
RCLASS_CC_TBL(v) = NULL;
}
}
}
return 0; // continue to iteration
}
void
rb_clear_method_cache_all(void)
{
rb_objspace_each_objects(invalidate_all_cc, NULL);
}
VALUE
rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
{
@ -138,7 +277,7 @@ rb_add_method_cfunc(VALUE klass, ID mid, VALUE (*func)(ANYARGS), int argc, rb_me
rb_method_cfunc_t opt;
opt.func = func;
opt.argc = argc;
rb_add_method(klass, mid, VM_METHOD_TYPE_CFUNC, &opt, visi);
rb_add_method(klass, mid, VM_METHOD_TYPE_CFUNC, &opt, visi);
}
else {
rb_define_notimplement_method_id(klass, mid, visi);
@ -161,8 +300,13 @@ rb_method_definition_release(rb_method_definition_t *def, int complemented)
xfree(def);
}
else {
if (complemented) def->complemented_count--;
else if (def->alias_count > 0) def->alias_count--;
if (complemented) {
VM_ASSERT(def->complemented_count > 0);
def->complemented_count--;
}
else if (def->alias_count > 0) {
def->alias_count--;
}
if (METHOD_DEBUG) fprintf(stderr, "-%p-%s:%d->%d,%d->%d (dec)\n", (void *)def, rb_id2name(def->original_id),
alias_count, def->alias_count, complemented_count, def->complemented_count);
@ -179,20 +323,6 @@ rb_free_method_entry(const rb_method_entry_t *me)
static inline rb_method_entry_t *search_method(VALUE klass, ID id, VALUE *defined_class_ptr);
extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
static inline rb_method_entry_t *
lookup_method_table(VALUE klass, ID id)
{
st_data_t body;
struct rb_id_table *m_tbl = RCLASS_M_TBL(klass);
if (rb_id_table_lookup(m_tbl, id, &body)) {
return (rb_method_entry_t *) body;
}
else {
return 0;
}
}
static VALUE
(*call_cfunc_invoker_func(int argc))(VALUE recv, int argc, const VALUE *, VALUE (*func)(ANYARGS))
{
@ -406,7 +536,11 @@ const rb_method_entry_t *
rb_method_entry_clone(const rb_method_entry_t *src_me)
{
rb_method_entry_t *me = rb_method_entry_alloc(src_me->called_id, src_me->owner, src_me->defined_class,
method_definition_addref(src_me->def));
method_definition_addref(src_me->def));
if (METHOD_ENTRY_COMPLEMENTED(src_me)) {
method_definition_addref_complement(src_me->def);
}
METHOD_ENTRY_FLAGS_COPY(me, src_me);
return me;
}
@ -487,6 +621,20 @@ make_method_entry_refined(VALUE owner, rb_method_entry_t *me)
}
}
static inline rb_method_entry_t *
lookup_method_table(VALUE klass, ID id)
{
st_data_t body;
struct rb_id_table *m_tbl = RCLASS_M_TBL(klass);
if (rb_id_table_lookup(m_tbl, id, &body)) {
return (rb_method_entry_t *) body;
}
else {
return 0;
}
}
void
rb_add_refined_method_entry(VALUE refined_class, ID mid)
{
@ -494,7 +642,7 @@ rb_add_refined_method_entry(VALUE refined_class, ID mid)
if (me) {
make_method_entry_refined(refined_class, me);
rb_clear_method_cache_by_class(refined_class);
rb_clear_method_cache(refined_class, mid);
}
else {
rb_add_method(refined_class, mid, VM_METHOD_TYPE_REFINED, 0, METHOD_VISI_PUBLIC);
@ -615,7 +763,7 @@ rb_method_entry_make(VALUE klass, ID mid, VALUE defined_class, rb_method_visibil
if (def == NULL) def = rb_method_definition_create(type, original_id);
rb_method_definition_set(me, def, opts);
rb_clear_method_cache_by_class(klass);
rb_clear_method_cache(klass, mid);
/* check mid */
if (klass == rb_cObject) {
@ -737,149 +885,169 @@ rb_get_alloc_func(VALUE klass)
return 0;
}
static inline rb_method_entry_t*
search_method(VALUE klass, ID id, VALUE *defined_class_ptr)
{
rb_method_entry_t *me;
for (; klass; klass = RCLASS_SUPER(klass)) {
RB_DEBUG_COUNTER_INC(mc_search_super);
if ((me = lookup_method_table(klass, id)) != 0) break;
}
if (defined_class_ptr)
*defined_class_ptr = klass;
return me;
}
const rb_method_entry_t *
rb_method_entry_at(VALUE klass, ID id)
{
return lookup_method_table(klass, id);
}
/*
* search method entry without the method cache.
*
* if you need method entry with method cache (normal case), use
* rb_method_entry() simply.
*/
static rb_method_entry_t *
method_entry_get_without_cache(VALUE klass, ID id,
VALUE *defined_class_ptr)
static inline rb_method_entry_t*
search_method(VALUE klass, ID id, VALUE *defined_class_ptr)
{
VALUE defined_class;
rb_method_entry_t *me = search_method(klass, id, &defined_class);
rb_method_entry_t *me;
if (ruby_running) {
if (OPT_GLOBAL_METHOD_CACHE) {
struct cache_entry *ent;
ent = GLOBAL_METHOD_CACHE(klass, id);
ent->class_serial = RCLASS_SERIAL(klass);
ent->method_state = GET_GLOBAL_METHOD_STATE();
ent->defined_class = defined_class;
ent->mid = id;
RB_DEBUG_COUNTER_INC(mc_search);
if (UNDEFINED_METHOD_ENTRY_P(me)) {
me = ent->me = NULL;
}
else {
ent->me = me;
}
}
else if (UNDEFINED_METHOD_ENTRY_P(me)) {
me = NULL;
}
}
else if (UNDEFINED_METHOD_ENTRY_P(me)) {
me = NULL;
for (; klass; klass = RCLASS_SUPER(klass)) {
RB_DEBUG_COUNTER_INC(mc_search_super);
if ((me = lookup_method_table(klass, id)) != 0) {
break;
}
}
if (defined_class_ptr)
*defined_class_ptr = defined_class;
if (defined_class_ptr) *defined_class_ptr = klass;
if (me == NULL) RB_DEBUG_COUNTER_INC(mc_search_notfound);
VM_ASSERT(me == NULL || !METHOD_ENTRY_INVALIDATED(me));
return me;
}
static void
verify_method_cache(VALUE klass, ID id, VALUE defined_class, rb_method_entry_t *me)
{
if (!VM_DEBUG_VERIFY_METHOD_CACHE) return;
VALUE actual_defined_class;
rb_method_entry_t *actual_me =
method_entry_get_without_cache(klass, id, &actual_defined_class);
if (me != actual_me || defined_class != actual_defined_class) {
rb_bug("method cache verification failed");
}
}
static rb_method_entry_t *
method_entry_get(VALUE klass, ID id, VALUE *defined_class_ptr)
search_method_protect(VALUE klass, ID id, VALUE *defined_class_ptr)
{
struct cache_entry *ent;
if (!OPT_GLOBAL_METHOD_CACHE) goto nocache;
ent = GLOBAL_METHOD_CACHE(klass, id);
if (ent->method_state == GET_GLOBAL_METHOD_STATE() &&
ent->class_serial == RCLASS_SERIAL(klass) &&
ent->mid == id) {
verify_method_cache(klass, id, ent->defined_class, ent->me);
if (defined_class_ptr) *defined_class_ptr = ent->defined_class;
RB_DEBUG_COUNTER_INC(mc_global_hit);
return ent->me;
}
rb_method_entry_t *me = search_method(klass, id, defined_class_ptr);
nocache:
RB_DEBUG_COUNTER_INC(mc_global_miss);
return method_entry_get_without_cache(klass, id, defined_class_ptr);
if (!UNDEFINED_METHOD_ENTRY_P(me)) {
return me;
}
else {
return NULL;
}
}
MJIT_FUNC_EXPORTED const rb_method_entry_t *
rb_method_entry(VALUE klass, ID id)
{
return method_entry_get(klass, id, NULL);
return search_method(klass, id, NULL);
}
static const rb_callable_method_entry_t *
prepare_callable_method_entry(VALUE defined_class, ID id, const rb_method_entry_t *me)
static inline const rb_callable_method_entry_t *
prepare_callable_method_entry(VALUE defined_class, ID id, const rb_method_entry_t * const me, int create)
{
struct rb_id_table *mtbl;
const rb_callable_method_entry_t *cme;
if (me && me->defined_class == 0) {
RB_DEBUG_COUNTER_INC(mc_cme_complement);
VM_ASSERT(RB_TYPE_P(defined_class, T_ICLASS) || RB_TYPE_P(defined_class, T_MODULE));
VM_ASSERT(me->defined_class == 0);
if (me) {
if (me->defined_class == 0) {
RB_DEBUG_COUNTER_INC(mc_cme_complement);
VM_ASSERT(RB_TYPE_P(defined_class, T_ICLASS) || RB_TYPE_P(defined_class, T_MODULE));
VM_ASSERT(me->defined_class == 0);
mtbl = RCLASS_CALLABLE_M_TBL(defined_class);
mtbl = RCLASS_CALLABLE_M_TBL(defined_class);
if (mtbl && rb_id_table_lookup(mtbl, id, (VALUE *)&me)) {
RB_DEBUG_COUNTER_INC(mc_cme_complement_hit);
cme = (rb_callable_method_entry_t *)me;
VM_ASSERT(callable_method_entry_p(cme));
}
else {
if (!mtbl) {
mtbl = RCLASS_EXT(defined_class)->callable_m_tbl = rb_id_table_create(0);
}
cme = rb_method_entry_complement_defined_class(me, me->called_id, defined_class);
rb_id_table_insert(mtbl, id, (VALUE)cme);
VM_ASSERT(callable_method_entry_p(cme));
}
if (mtbl && rb_id_table_lookup(mtbl, id, (VALUE *)&cme)) {
RB_DEBUG_COUNTER_INC(mc_cme_complement_hit);
VM_ASSERT(callable_method_entry_p(cme));
VM_ASSERT(!METHOD_ENTRY_INVALIDATED(cme));
}
else if (create) {
if (!mtbl) {
mtbl = RCLASS_EXT(defined_class)->callable_m_tbl = rb_id_table_create(0);
}
cme = rb_method_entry_complement_defined_class(me, me->called_id, defined_class);
rb_id_table_insert(mtbl, id, (VALUE)cme);
VM_ASSERT(callable_method_entry_p(cme));
}
else {
return NULL;
}
}
else {
cme = (const rb_callable_method_entry_t *)me;
VM_ASSERT(callable_method_entry_p(cme));
VM_ASSERT(!METHOD_ENTRY_INVALIDATED(cme));
}
return cme;
}
else {
cme = (const rb_callable_method_entry_t *)me;
VM_ASSERT(callable_method_entry_p(cme));
return NULL;
}
}
static const rb_callable_method_entry_t *
copmplemented_callable_method_entry(VALUE klass, ID id)
{
VALUE defined_class;
rb_method_entry_t *me = search_method_protect(klass, id, &defined_class);
return prepare_callable_method_entry(defined_class, id, me, FALSE);
}
static const rb_callable_method_entry_t *
cached_callable_method_entry(VALUE klass, ID mid)
{
struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
struct rb_class_cc_entries *ccs;
if (cc_tbl && rb_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
VM_ASSERT(vm_ccs_p(ccs));
if (LIKELY(!METHOD_ENTRY_INVALIDATED(ccs->cme))) {
VM_ASSERT(ccs->cme->called_id == mid);
RB_DEBUG_COUNTER_INC(ccs_found);
return ccs->cme;
}
else {
rb_vm_ccs_free(ccs);
rb_id_table_delete(cc_tbl, mid);
}
}
return NULL;
}
static void
cache_callable_method_entry(VALUE klass, ID mid, const rb_callable_method_entry_t *cme)
{
struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
struct rb_class_cc_entries *ccs;
if (!cc_tbl) {
cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
}
if (rb_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
VM_ASSERT(ccs->cme == cme);
}
else {
ccs = vm_ccs_create(klass, cme);
rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
}
}
static const rb_callable_method_entry_t *
callable_method_entry(VALUE klass, ID mid, VALUE *defined_class_ptr)
{
VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
const rb_callable_method_entry_t *cme = cached_callable_method_entry(klass, mid);
if (cme) {
if (defined_class_ptr != NULL) *defined_class_ptr = cme->defined_class;
}
else {
VALUE defined_class;
rb_method_entry_t *me = search_method_protect(klass, mid, &defined_class);
if (defined_class_ptr) *defined_class_ptr = defined_class;
cme = prepare_callable_method_entry(defined_class, mid, me, TRUE);
if (cme) cache_callable_method_entry(klass, mid, cme);
}
return cme;
}
MJIT_FUNC_EXPORTED const rb_callable_method_entry_t *
rb_callable_method_entry(VALUE klass, ID id)
rb_callable_method_entry(VALUE klass, ID mid)
{
VALUE defined_class;
rb_method_entry_t *me = method_entry_get(klass, id, &defined_class);
return prepare_callable_method_entry(defined_class, id, me);
return callable_method_entry(klass, mid, NULL);
}
static const rb_method_entry_t *resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *defined_class_ptr);
@ -887,7 +1055,7 @@ static const rb_method_entry_t *resolve_refined_method(VALUE refinements, const
static const rb_method_entry_t *
method_entry_resolve_refinement(VALUE klass, ID id, int with_refinement, VALUE *defined_class_ptr)
{
const rb_method_entry_t *me = method_entry_get(klass, id, defined_class_ptr);
const rb_method_entry_t *me = search_method_protect(klass, id, defined_class_ptr);
if (me) {
if (me->def->type == VM_METHOD_TYPE_REFINED) {
@ -916,9 +1084,15 @@ rb_method_entry_with_refinements(VALUE klass, ID id, VALUE *defined_class_ptr)
MJIT_FUNC_EXPORTED const rb_callable_method_entry_t *
rb_callable_method_entry_with_refinements(VALUE klass, ID id, VALUE *defined_class_ptr)
{
VALUE defined_class, *dcp = defined_class_ptr ? defined_class_ptr : &defined_class;
const rb_method_entry_t *me = method_entry_resolve_refinement(klass, id, TRUE, dcp);
return prepare_callable_method_entry(*dcp, id, me);
const rb_callable_method_entry_t *cme = callable_method_entry(klass, id, defined_class_ptr);
if (cme == NULL || cme->def->type != VM_METHOD_TYPE_REFINED) {
return cme;
}
else {
VALUE defined_class, *dcp = defined_class_ptr ? defined_class_ptr : &defined_class;
const rb_method_entry_t *me = method_entry_resolve_refinement(klass, id, TRUE, dcp);
return prepare_callable_method_entry(*dcp, id, me, TRUE);
}
}
const rb_method_entry_t *
@ -932,7 +1106,7 @@ rb_callable_method_entry_without_refinements(VALUE klass, ID id, VALUE *defined_
{
VALUE defined_class, *dcp = defined_class_ptr ? defined_class_ptr : &defined_class;
const rb_method_entry_t *me = method_entry_resolve_refinement(klass, id, FALSE, dcp);
return prepare_callable_method_entry(*dcp, id, me);
return prepare_callable_method_entry(*dcp, id, me, TRUE);
}
static const rb_method_entry_t *
@ -945,7 +1119,7 @@ resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *de
refinement = find_refinement(refinements, me->owner);
if (!NIL_P(refinement)) {
tmp_me = method_entry_get(refinement, me->called_id, defined_class_ptr);
tmp_me = search_method_protect(refinement, me->called_id, defined_class_ptr);
if (tmp_me && tmp_me->def->type != VM_METHOD_TYPE_REFINED) {
return tmp_me;
@ -963,7 +1137,7 @@ resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *de
return 0;
}
me = method_entry_get(super, me->called_id, defined_class_ptr);
me = search_method_protect(super, me->called_id, defined_class_ptr);
}
return me;
}
@ -1010,10 +1184,10 @@ remove_method(VALUE klass, ID mid)
klass, ID2SYM(mid));
}
rb_clear_method_cache(klass, mid);
rb_id_table_delete(RCLASS_M_TBL(klass), mid);
rb_vm_check_redefinition_opt_method(me, klass);
rb_clear_method_cache_by_class(klass);
if (me->def->type == VM_METHOD_TYPE_REFINED) {
rb_add_refined_method_entry(klass, mid);
@ -1069,6 +1243,7 @@ rb_export_method(VALUE klass, ID name, rb_method_visibility_t visi)
VALUE origin_class = RCLASS_ORIGIN(klass);
me = search_method(origin_class, name, &defined_class);
if (!me && RB_TYPE_P(klass, T_MODULE)) {
me = search_method(rb_cObject, name, &defined_class);
}
@ -1087,7 +1262,7 @@ rb_export_method(VALUE klass, ID name, rb_method_visibility_t visi)
if (me->def->type == VM_METHOD_TYPE_REFINED && me->def->body.refined.orig_me) {
METHOD_ENTRY_VISI_SET((rb_method_entry_t *)me->def->body.refined.orig_me, visi);
}
rb_clear_method_cache_by_class(klass);
rb_clear_method_cache(klass, name);
}
else {
rb_add_method(klass, name, VM_METHOD_TYPE_ZSUPER, 0, visi);
@ -1110,8 +1285,8 @@ rb_method_boundp(VALUE klass, ID id, int ex)
me = rb_method_entry_without_refinements(klass, id, NULL);
}
if (me != 0) {
if ((ex & ~BOUND_RESPONDS) &&
if (me != NULL) {
if ((ex & ~BOUND_RESPONDS) &&
((METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE) ||
((ex & BOUND_RESPONDS) && (METHOD_ENTRY_VISI(me) == METHOD_VISI_PROTECTED)))) {
return 0;
@ -1593,6 +1768,7 @@ rb_alias(VALUE klass, ID alias_name, ID original_name)
again:
orig_me = search_method(klass, original_name, &defined_class);
if (orig_me && orig_me->def->type == VM_METHOD_TYPE_REFINED) {
orig_me = rb_resolve_refined_method(Qnil, orig_me);
}
@ -1841,7 +2017,7 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module)
!me->def->body.iseq.iseqptr->body->param.flags.has_kw &&
!me->def->body.iseq.iseqptr->body->param.flags.has_kwrest) {
me->def->body.iseq.iseqptr->body->param.flags.ruby2_keywords = 1;
rb_clear_method_cache_by_class(module);
rb_clear_method_cache(module, name);
}
else {
rb_warn("Skipping set of ruby2_keywords flag for %s (method accepts keywords or method does not accept argument splat)", rb_id2name(name));
@ -1860,7 +2036,7 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module)
!iseq->body->param.flags.has_kw &&
!iseq->body->param.flags.has_kwrest) {
iseq->body->param.flags.ruby2_keywords = 1;
rb_clear_method_cache_by_class(module);
rb_clear_method_cache(module, name);
}
else {
rb_warn("Skipping set of ruby2_keywords flag for %s (method accepts keywords or method does not accept argument splat)", rb_id2name(name));
@ -2061,10 +2237,10 @@ rb_mod_modfunc(int argc, VALUE *argv, VALUE module)
int
rb_method_basic_definition_p(VALUE klass, ID id)
{
const rb_method_entry_t *me;
const rb_callable_method_entry_t *cme;
if (!klass) return TRUE; /* hidden object cannot be overridden */
me = rb_method_entry(klass, id);
return (me && METHOD_ENTRY_BASIC(me)) ? TRUE : FALSE;
cme = rb_callable_method_entry(klass, id);
return (cme && METHOD_ENTRY_BASIC(cme)) ? TRUE : FALSE;
}
#ifdef __GNUC__
#pragma pop_macro("rb_method_basic_definition_p")
@ -2072,10 +2248,8 @@ rb_method_basic_definition_p(VALUE klass, ID id)
static VALUE
call_method_entry(rb_execution_context_t *ec, VALUE defined_class, VALUE obj, ID id,
const rb_method_entry_t *me, int argc, const VALUE *argv, int kw_splat)
const rb_callable_method_entry_t *cme, int argc, const VALUE *argv, int kw_splat)
{
const rb_callable_method_entry_t *cme =
prepare_callable_method_entry(defined_class, id, me);
VALUE passed_block_handler = vm_passed_block_handler(ec);
VALUE result = rb_vm_call_kw(ec, obj, id, argc, argv, cme, kw_splat);
vm_passed_block_handler_set(ec, passed_block_handler);
@ -2088,13 +2262,12 @@ basic_obj_respond_to_missing(rb_execution_context_t *ec, VALUE klass, VALUE obj,
{
VALUE defined_class, args[2];
const ID rtmid = idRespond_to_missing;
const rb_method_entry_t *const me =
method_entry_get(klass, rtmid, &defined_class);
const rb_callable_method_entry_t *const cme = callable_method_entry(klass, rtmid, &defined_class);
if (!me || METHOD_ENTRY_BASIC(me)) return Qundef;
if (!cme || METHOD_ENTRY_BASIC(cme)) return Qundef;
args[0] = mid;
args[1] = priv;
return call_method_entry(ec, defined_class, obj, rtmid, me, 2, args, RB_NO_KEYWORDS);
return call_method_entry(ec, defined_class, obj, rtmid, cme, 2, args, RB_NO_KEYWORDS);
}
static inline int
@ -2120,11 +2293,10 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri
{
VALUE defined_class;
const ID resid = idRespond_to;
const rb_method_entry_t *const me =
method_entry_get(klass, resid, &defined_class);
const rb_callable_method_entry_t *const cme = callable_method_entry(klass, resid, &defined_class);
if (!me) return -1;
if (METHOD_ENTRY_BASIC(me)) {
if (!cme) return -1;
if (METHOD_ENTRY_BASIC(cme)) {
return -1;
}
else {
@ -2135,7 +2307,7 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri
args[0] = ID2SYM(id);
args[1] = Qtrue;
if (priv) {
argc = rb_method_entry_arity(me);
argc = rb_method_entry_arity((const rb_method_entry_t *)cme);
if (argc > 2) {
rb_raise(rb_eArgError,
"respond_to? must accept 1 or 2 arguments (requires %d)",
@ -2145,7 +2317,7 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri
argc = 2;
}
else if (!NIL_P(ruby_verbose)) {
VALUE location = rb_method_entry_location(me);
VALUE location = rb_method_entry_location((const rb_method_entry_t *)cme);
rb_warn("%"PRIsVALUE"%c""respond_to?(:%"PRIsVALUE") uses"
" the deprecated method signature, which takes one parameter",
(FL_TEST(klass, FL_SINGLETON) ? obj : klass),
@ -2161,7 +2333,7 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri
}
}
}
result = call_method_entry(ec, defined_class, obj, resid, me, argc, args, RB_NO_KEYWORDS);
result = call_method_entry(ec, defined_class, obj, resid, cme, argc, args, RB_NO_KEYWORDS);
return RTEST(result);
}
}
@ -2246,25 +2418,7 @@ obj_respond_to_missing(VALUE obj, VALUE mid, VALUE priv)
void
Init_Method(void)
{
if (!OPT_GLOBAL_METHOD_CACHE) return;
char *ptr = getenv("RUBY_GLOBAL_METHOD_CACHE_SIZE");
int val;
if (ptr != NULL && (val = atoi(ptr)) > 0) {
if ((val & (val - 1)) == 0) { /* ensure val is a power of 2 */
global_method_cache.size = val;
global_method_cache.mask = val - 1;
}
else {
fprintf(stderr, "RUBY_GLOBAL_METHOD_CACHE_SIZE was set to %d but ignored because the value is not a power of 2.\n", val);
}
}
global_method_cache.entries = (struct cache_entry *)calloc(global_method_cache.size, sizeof(struct cache_entry));
if (global_method_cache.entries == NULL) {
fprintf(stderr, "[FATAL] failed to allocate memory\n");
exit(EXIT_FAILURE);
}
//
}
void