2017-02-21 11:18:15 +03:00
|
|
|
/**********************************************************************
|
|
|
|
|
|
|
|
debug_counter.h -
|
|
|
|
|
|
|
|
created at: Tue Feb 21 16:51:18 2017
|
|
|
|
|
|
|
|
Copyright (C) 2017 Koichi Sasada
|
|
|
|
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
#ifndef USE_DEBUG_COUNTER
|
|
|
|
#define USE_DEBUG_COUNTER 0
|
|
|
|
#endif
|
|
|
|
|
2017-03-10 10:18:03 +03:00
|
|
|
#ifdef RB_DEBUG_COUNTER
|
2017-05-31 09:46:57 +03:00
|
|
|
|
2020-01-08 10:14:01 +03:00
|
|
|
// method cache (IMC: inline method cache)
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_hit) // IMC hit
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_klass) // IMC miss by different class
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_invalidated) // IMC miss by invalidated ME
|
2020-12-14 11:56:34 +03:00
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_empty) // IMC miss because prev is empty slot
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_same_cc) // IMC miss, but same CC
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_same_cme) // IMC miss, but same CME
|
2020-12-14 12:37:22 +03:00
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_same_def) // IMC miss, but same definition
|
2020-12-14 11:56:34 +03:00
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_diff) // IMC miss, different methods
|
|
|
|
|
2020-01-08 10:14:01 +03:00
|
|
|
RB_DEBUG_COUNTER(mc_cme_complement) // number of acquiring complement CME
|
2020-03-16 17:21:08 +03:00
|
|
|
RB_DEBUG_COUNTER(mc_cme_complement_hit) // number of cache hit for complemented CME
|
2020-01-08 10:14:01 +03:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(mc_search) // count for method lookup in class tree
|
|
|
|
RB_DEBUG_COUNTER(mc_search_notfound) // method lookup, but not found
|
|
|
|
RB_DEBUG_COUNTER(mc_search_super) // total traversed classes
|
2017-05-24 09:46:44 +03:00
|
|
|
|
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of
(mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and
mid+argc+flags only requires 64bits. So this patch packed
rb_call_info to VALUE (1 word) on such cases. If we can not
represent it in VALUE, then use imemo_callinfo which contains
conventional callinfo (rb_callinfo, renamed from rb_call_info).
iseq->body->ci_kw_size is removed because all of callinfo is VALUE
size (packed ci or a pointer to imemo_callinfo).
To access ci information, we need to use these functions:
vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci).
struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg.
rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc()
is temporary removed because cd->ci should be marked.
2020-01-08 02:20:36 +03:00
|
|
|
// callinfo
|
2020-01-08 10:14:01 +03:00
|
|
|
RB_DEBUG_COUNTER(ci_packed) // number of packed CI
|
|
|
|
RB_DEBUG_COUNTER(ci_kw) // non-packed CI w/ keywords
|
|
|
|
RB_DEBUG_COUNTER(ci_nokw) // non-packed CI w/o keywords
|
|
|
|
RB_DEBUG_COUNTER(ci_runtime) // creating temporary CI
|
|
|
|
|
|
|
|
// callcache
|
|
|
|
RB_DEBUG_COUNTER(cc_new) // number of CC
|
|
|
|
RB_DEBUG_COUNTER(cc_temp) // dummy CC (stack-allocated)
|
2020-12-15 06:10:35 +03:00
|
|
|
RB_DEBUG_COUNTER(cc_found_in_ccs) // count for CC lookup success in CCS
|
|
|
|
RB_DEBUG_COUNTER(cc_not_found_in_ccs) // count for CC lookup success in CCS
|
2020-01-08 10:14:01 +03:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(cc_ent_invalidate) // count for invalidating cc (cc->klass = 0)
|
2020-03-16 17:21:08 +03:00
|
|
|
RB_DEBUG_COUNTER(cc_cme_invalidate) // count for invalidating CME
|
2020-01-08 10:14:01 +03:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_leaf) // count for invalidating klass if klass has no-sublcasses
|
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_leaf_ccs) // corresponding CCS
|
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_leaf_callable) // complimented cache (no-subclasses)
|
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_tree) // count for invalidating klass if klass has sublcasses
|
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_tree_cme) // cme if cme is found in this class or superclasses
|
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_tree_callable) // complimented cache (subclasses)
|
2020-12-14 04:49:07 +03:00
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_negative) // count for invalidating negative cache
|
2020-01-08 10:14:01 +03:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(ccs_free) // count for free'ing ccs
|
|
|
|
RB_DEBUG_COUNTER(ccs_maxlen) // maximum length of ccs
|
2020-12-14 12:17:35 +03:00
|
|
|
RB_DEBUG_COUNTER(ccs_found) // count for finding corresponding ccs on method lookup
|
|
|
|
RB_DEBUG_COUNTER(ccs_not_found) // count for not found corresponding ccs on method lookup
|
2020-01-08 10:14:01 +03:00
|
|
|
|
2020-12-15 06:10:35 +03:00
|
|
|
// vm_eval.c
|
|
|
|
RB_DEBUG_COUNTER(call0_public)
|
|
|
|
RB_DEBUG_COUNTER(call0_other)
|
|
|
|
|
2020-01-08 10:14:01 +03:00
|
|
|
// iseq
|
|
|
|
RB_DEBUG_COUNTER(iseq_num) // number of total created iseq
|
|
|
|
RB_DEBUG_COUNTER(iseq_cd_num) // number of total created cd (call_data)
|
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of
(mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and
mid+argc+flags only requires 64bits. So this patch packed
rb_call_info to VALUE (1 word) on such cases. If we can not
represent it in VALUE, then use imemo_callinfo which contains
conventional callinfo (rb_callinfo, renamed from rb_call_info).
iseq->body->ci_kw_size is removed because all of callinfo is VALUE
size (packed ci or a pointer to imemo_callinfo).
To access ci information, we need to use these functions:
vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci).
struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg.
rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc()
is temporary removed because cd->ci should be marked.
2020-01-08 02:20:36 +03:00
|
|
|
|
2019-03-22 10:57:26 +03:00
|
|
|
/*
|
|
|
|
* call cache fastpath usage
|
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(ccf_general)
|
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_setup)
|
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_setup_0start)
|
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_setup_tailcall_0start)
|
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_fix) /* several functions created with tool/mk_call_iseq_optimized.rb */
|
2019-03-29 15:31:08 +03:00
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_opt) /* has_opt == TRUE (has optional parameters), but other flags are FALSE */
|
2019-03-29 16:06:48 +03:00
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_kw1) /* vm_call_iseq_setup_kwparm_kwarg() */
|
2019-03-29 15:31:08 +03:00
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_kw2) /* vm_call_iseq_setup_kwparm_nokwarg() */
|
2019-03-22 10:57:26 +03:00
|
|
|
RB_DEBUG_COUNTER(ccf_cfunc)
|
2020-04-14 06:32:59 +03:00
|
|
|
RB_DEBUG_COUNTER(ccf_cfunc_with_frame)
|
2019-03-29 15:31:08 +03:00
|
|
|
RB_DEBUG_COUNTER(ccf_ivar) /* attr_reader */
|
|
|
|
RB_DEBUG_COUNTER(ccf_attrset) /* attr_writer */
|
2019-03-22 10:57:26 +03:00
|
|
|
RB_DEBUG_COUNTER(ccf_method_missing)
|
|
|
|
RB_DEBUG_COUNTER(ccf_zsuper)
|
|
|
|
RB_DEBUG_COUNTER(ccf_bmethod)
|
|
|
|
RB_DEBUG_COUNTER(ccf_opt_send)
|
|
|
|
RB_DEBUG_COUNTER(ccf_opt_call)
|
|
|
|
RB_DEBUG_COUNTER(ccf_opt_block_call)
|
|
|
|
RB_DEBUG_COUNTER(ccf_super_method)
|
|
|
|
|
2018-09-28 04:10:43 +03:00
|
|
|
/*
|
|
|
|
* control frame push counts.
|
|
|
|
*
|
|
|
|
* * frame_push: frame push counts.
|
2018-09-28 06:35:15 +03:00
|
|
|
* * frame_push_*: frame push counts per each type.
|
|
|
|
* * frame_R2R: Ruby frame to Ruby frame
|
|
|
|
* * frame_R2C: Ruby frame to C frame
|
|
|
|
* * frame_C2C: C frame to C frame
|
|
|
|
* * frame_C2R: C frame to Ruby frame
|
2018-09-28 04:10:43 +03:00
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(frame_push)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_method)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_block)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_class)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_top)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_cfunc)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_ifunc)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_eval)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_rescue)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_dummy)
|
|
|
|
|
2018-09-28 06:35:15 +03:00
|
|
|
RB_DEBUG_COUNTER(frame_R2R)
|
|
|
|
RB_DEBUG_COUNTER(frame_R2C)
|
|
|
|
RB_DEBUG_COUNTER(frame_C2C)
|
|
|
|
RB_DEBUG_COUNTER(frame_C2R)
|
|
|
|
|
2018-09-25 21:13:29 +03:00
|
|
|
/* instance variable counts
|
|
|
|
*
|
|
|
|
* * ivar_get_ic_hit/miss: ivar_get inline cache (ic) hit/miss counts (VM insn)
|
|
|
|
* * ivar_get_ic_miss_serial: ivar_get ic miss reason by serial (VM insn)
|
|
|
|
* * ivar_get_ic_miss_unset: ... by unset (VM insn)
|
|
|
|
* * ivar_get_ic_miss_noobject: ... by "not T_OBJECT" (VM insn)
|
|
|
|
* * ivar_set_...: same counts with ivar_set (VM insn)
|
|
|
|
* * ivar_get/set_base: call counts of "rb_ivar_get/set()".
|
|
|
|
* because of (1) ic miss.
|
|
|
|
* (2) direct call by C extensions.
|
|
|
|
*/
|
2017-03-15 11:25:58 +03:00
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_hit)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_miss)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_miss_serial)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_miss_unset)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_miss_noobject)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_hit)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss_serial)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss_unset)
|
2020-11-09 22:59:18 +03:00
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss_iv_hit)
|
2017-03-15 11:25:58 +03:00
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss_noobject)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_base)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_base)
|
2017-05-24 09:46:44 +03:00
|
|
|
|
2018-09-25 21:13:29 +03:00
|
|
|
/* local variable counts
|
|
|
|
*
|
|
|
|
* * lvar_get: total lvar get counts (VM insn)
|
|
|
|
* * lvar_get_dynamic: lvar get counts if accessing upper env (VM insn)
|
|
|
|
* * lvar_set*: same as "get"
|
|
|
|
* * lvar_set_slowpath: counts using vm_env_write_slowpath()
|
|
|
|
*/
|
2017-05-31 09:46:57 +03:00
|
|
|
RB_DEBUG_COUNTER(lvar_get)
|
|
|
|
RB_DEBUG_COUNTER(lvar_get_dynamic)
|
|
|
|
RB_DEBUG_COUNTER(lvar_set)
|
|
|
|
RB_DEBUG_COUNTER(lvar_set_dynamic)
|
|
|
|
RB_DEBUG_COUNTER(lvar_set_slowpath)
|
|
|
|
|
2018-10-25 01:17:03 +03:00
|
|
|
/* GC counts:
|
|
|
|
*
|
|
|
|
* * count: simple count
|
|
|
|
* * _minor: minor gc
|
|
|
|
* * _major: major gc
|
|
|
|
* * other suffix is corresponding to last_gc_info or
|
|
|
|
* gc_profile_record_flag in gc.c.
|
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(gc_count)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_newobj)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_malloc)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_method)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_capi)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_stress)
|
|
|
|
RB_DEBUG_COUNTER(gc_major_nofree)
|
|
|
|
RB_DEBUG_COUNTER(gc_major_oldgen)
|
|
|
|
RB_DEBUG_COUNTER(gc_major_shady)
|
|
|
|
RB_DEBUG_COUNTER(gc_major_force)
|
2018-10-25 05:23:58 +03:00
|
|
|
RB_DEBUG_COUNTER(gc_major_oldmalloc)
|
2018-10-25 01:17:03 +03:00
|
|
|
|
2020-12-17 11:03:05 +03:00
|
|
|
RB_DEBUG_COUNTER(gc_enter_start)
|
|
|
|
RB_DEBUG_COUNTER(gc_enter_mark_continue)
|
|
|
|
RB_DEBUG_COUNTER(gc_enter_sweep_continue)
|
|
|
|
RB_DEBUG_COUNTER(gc_enter_rest)
|
|
|
|
RB_DEBUG_COUNTER(gc_enter_finalizer)
|
|
|
|
|
2019-05-07 08:06:25 +03:00
|
|
|
RB_DEBUG_COUNTER(gc_isptr_trial)
|
|
|
|
RB_DEBUG_COUNTER(gc_isptr_range)
|
|
|
|
RB_DEBUG_COUNTER(gc_isptr_align)
|
|
|
|
RB_DEBUG_COUNTER(gc_isptr_maybe)
|
|
|
|
|
2018-09-25 21:13:29 +03:00
|
|
|
/* object allocation counts:
|
|
|
|
*
|
|
|
|
* * obj_newobj: newobj counts
|
|
|
|
* * obj_newobj_slowpath: newobj with slowpath counts
|
2020-03-16 17:21:08 +03:00
|
|
|
* * obj_newobj_wb_unprotected: newobj for wb_unprotected.
|
2018-09-25 21:13:29 +03:00
|
|
|
* * obj_free: obj_free() counts
|
2018-09-26 10:28:04 +03:00
|
|
|
* * obj_promote: promoted counts (oldgen)
|
|
|
|
* * obj_wb_unprotect: wb unprotect counts
|
2018-09-25 21:13:29 +03:00
|
|
|
*
|
2018-09-28 04:10:43 +03:00
|
|
|
* * obj_[type]_[attr]: *free'ed counts* for each type.
|
|
|
|
* Note that it is not a allocated counts.
|
2018-09-25 21:13:29 +03:00
|
|
|
* * [type]
|
|
|
|
* * _obj: T_OBJECT
|
|
|
|
* * _str: T_STRING
|
|
|
|
* * _ary: T_ARRAY
|
2018-09-28 04:10:43 +03:00
|
|
|
* * _xxx: T_XXX (hash, struct, ...)
|
2018-09-25 21:13:29 +03:00
|
|
|
*
|
|
|
|
* * [attr]
|
|
|
|
* * _ptr: R?? is not embed.
|
|
|
|
* * _embed: R?? is embed.
|
2018-10-31 01:01:17 +03:00
|
|
|
* * _transient: R?? uses transient heap.
|
2018-09-25 21:13:29 +03:00
|
|
|
* * type specific attr.
|
|
|
|
* * str_shared: str is shared.
|
|
|
|
* * str_nofree: nofree
|
|
|
|
* * str_fstr: fstr
|
|
|
|
* * hash_empty: hash is empty
|
2019-07-19 10:24:14 +03:00
|
|
|
* * hash_1_4: has 1 to 4 entries
|
|
|
|
* * hash_5_8: has 5 to 8 entries
|
|
|
|
* * hash_g8: has n entries (n>8)
|
2019-04-20 02:44:51 +03:00
|
|
|
* * match_under4: has under 4 oniguruma regions allocated
|
|
|
|
* * match_ge4: has n regions allocated (4<=n<8)
|
|
|
|
* * match_ge8: has n regions allocated (8<=n)
|
2018-09-28 04:10:43 +03:00
|
|
|
* * data_empty: T_DATA but no memory free.
|
|
|
|
* * data_xfree: free'ed by xfree().
|
|
|
|
* * data_imm_free: free'ed immediately.
|
|
|
|
* * data_zombie: free'ed with zombie.
|
|
|
|
* * imemo_*: T_IMEMO with each type.
|
2018-09-25 21:13:29 +03:00
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(obj_newobj)
|
|
|
|
RB_DEBUG_COUNTER(obj_newobj_slowpath)
|
|
|
|
RB_DEBUG_COUNTER(obj_newobj_wb_unprotected)
|
2017-05-24 09:46:44 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_free)
|
2018-09-26 10:28:04 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_promote)
|
|
|
|
RB_DEBUG_COUNTER(obj_wb_unprotect)
|
2017-05-24 09:46:44 +03:00
|
|
|
|
2018-10-31 00:01:55 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_obj_embed)
|
2018-10-31 01:01:17 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_obj_transient)
|
|
|
|
RB_DEBUG_COUNTER(obj_obj_ptr)
|
2018-09-25 21:13:29 +03:00
|
|
|
|
2017-05-24 09:46:44 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_str_ptr)
|
|
|
|
RB_DEBUG_COUNTER(obj_str_embed)
|
|
|
|
RB_DEBUG_COUNTER(obj_str_shared)
|
|
|
|
RB_DEBUG_COUNTER(obj_str_nofree)
|
|
|
|
RB_DEBUG_COUNTER(obj_str_fstr)
|
|
|
|
|
2018-10-31 00:01:55 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_ary_embed)
|
2018-10-31 01:01:17 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_ary_transient)
|
|
|
|
RB_DEBUG_COUNTER(obj_ary_ptr)
|
2019-09-25 10:55:54 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_ary_extracapa)
|
2019-07-19 07:02:38 +03:00
|
|
|
/*
|
|
|
|
ary_shared_create: shared ary by Array#dup and so on.
|
|
|
|
ary_shared: finished in shard.
|
|
|
|
ary_shared_root_occupied: shared_root but has only 1 refcnt.
|
|
|
|
The number (ary_shared - ary_shared_root_occupied) is meaningful.
|
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(obj_ary_shared_create)
|
|
|
|
RB_DEBUG_COUNTER(obj_ary_shared)
|
|
|
|
RB_DEBUG_COUNTER(obj_ary_shared_root_occupied)
|
2017-05-24 09:46:44 +03:00
|
|
|
|
2018-09-25 21:13:29 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_empty)
|
2019-08-02 08:59:29 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_1)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_2)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_3)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_4)
|
2019-07-19 10:24:14 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_5_8)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_g8)
|
2019-08-02 08:59:29 +03:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_null)
|
2018-12-14 04:10:15 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_ar)
|
2018-10-31 01:11:51 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_st)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_transient)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_force_convert)
|
2018-09-25 21:13:29 +03:00
|
|
|
|
2018-09-26 10:28:04 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_struct_embed)
|
2018-10-31 01:03:42 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_struct_transient)
|
|
|
|
RB_DEBUG_COUNTER(obj_struct_ptr)
|
2018-09-26 10:28:04 +03:00
|
|
|
|
2018-09-28 04:10:43 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_data_empty)
|
|
|
|
RB_DEBUG_COUNTER(obj_data_xfree)
|
|
|
|
RB_DEBUG_COUNTER(obj_data_imm_free)
|
|
|
|
RB_DEBUG_COUNTER(obj_data_zombie)
|
|
|
|
|
2019-04-20 02:44:51 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_match_under4)
|
|
|
|
RB_DEBUG_COUNTER(obj_match_ge4)
|
|
|
|
RB_DEBUG_COUNTER(obj_match_ge8)
|
2018-09-28 04:10:43 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_match_ptr)
|
2019-12-23 10:30:45 +03:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(obj_iclass_ptr)
|
|
|
|
RB_DEBUG_COUNTER(obj_class_ptr)
|
|
|
|
RB_DEBUG_COUNTER(obj_module_ptr)
|
|
|
|
|
2018-09-28 04:10:43 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_bignum_ptr)
|
2019-12-23 10:30:45 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_bignum_embed)
|
|
|
|
RB_DEBUG_COUNTER(obj_float)
|
|
|
|
RB_DEBUG_COUNTER(obj_complex)
|
|
|
|
RB_DEBUG_COUNTER(obj_rational)
|
2018-09-28 04:10:43 +03:00
|
|
|
|
2019-12-23 10:30:45 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_regexp_ptr)
|
|
|
|
RB_DEBUG_COUNTER(obj_file_ptr)
|
2018-09-28 04:10:43 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_symbol)
|
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_ment)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_iseq)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_env)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_tmpbuf)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_ast)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_cref)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_svar)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_throw_data)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_ifunc)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_memo)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
|
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of
(mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and
mid+argc+flags only requires 64bits. So this patch packed
rb_call_info to VALUE (1 word) on such cases. If we can not
represent it in VALUE, then use imemo_callinfo which contains
conventional callinfo (rb_callinfo, renamed from rb_call_info).
iseq->body->ci_kw_size is removed because all of callinfo is VALUE
size (packed ci or a pointer to imemo_callinfo).
To access ci information, we need to use these functions:
vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci).
struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg.
rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc()
is temporary removed because cd->ci should be marked.
2020-01-08 02:20:36 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_imemo_callinfo)
|
2020-01-08 10:14:01 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_imemo_callcache)
|
2021-01-04 12:08:25 +03:00
|
|
|
RB_DEBUG_COUNTER(obj_imemo_constcache)
|
2018-09-28 04:10:43 +03:00
|
|
|
|
2019-01-17 19:53:10 +03:00
|
|
|
/* ar_table */
|
|
|
|
RB_DEBUG_COUNTER(artable_hint_hit)
|
|
|
|
RB_DEBUG_COUNTER(artable_hint_miss)
|
|
|
|
RB_DEBUG_COUNTER(artable_hint_notfound)
|
|
|
|
|
2018-09-25 21:13:29 +03:00
|
|
|
/* heap function counts
|
|
|
|
*
|
|
|
|
* * heap_xmalloc/realloc/xfree: call counts
|
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(heap_xmalloc)
|
|
|
|
RB_DEBUG_COUNTER(heap_xrealloc)
|
|
|
|
RB_DEBUG_COUNTER(heap_xfree)
|
|
|
|
|
2018-10-31 00:53:56 +03:00
|
|
|
/* transient_heap */
|
|
|
|
RB_DEBUG_COUNTER(theap_alloc)
|
|
|
|
RB_DEBUG_COUNTER(theap_alloc_fail)
|
|
|
|
RB_DEBUG_COUNTER(theap_evacuate)
|
|
|
|
|
2020-12-16 04:10:05 +03:00
|
|
|
// VM sync
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_lock)
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_lock_enter)
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_lock_enter_nb)
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_lock_enter_cr)
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_barrier)
|
|
|
|
|
2019-03-29 15:52:59 +03:00
|
|
|
/* mjit_exec() counts */
|
|
|
|
RB_DEBUG_COUNTER(mjit_exec)
|
|
|
|
RB_DEBUG_COUNTER(mjit_exec_not_added)
|
|
|
|
RB_DEBUG_COUNTER(mjit_exec_not_ready)
|
|
|
|
RB_DEBUG_COUNTER(mjit_exec_not_compiled)
|
2019-03-29 16:54:29 +03:00
|
|
|
RB_DEBUG_COUNTER(mjit_exec_call_func)
|
|
|
|
|
2020-03-15 10:24:15 +03:00
|
|
|
/* MJIT enqueue / unload */
|
|
|
|
RB_DEBUG_COUNTER(mjit_add_iseq_to_process)
|
|
|
|
RB_DEBUG_COUNTER(mjit_unload_units)
|
|
|
|
|
2019-04-06 17:42:02 +03:00
|
|
|
/* MJIT <-> VM frame push counts */
|
|
|
|
RB_DEBUG_COUNTER(mjit_frame_VM2VM)
|
|
|
|
RB_DEBUG_COUNTER(mjit_frame_VM2JT)
|
|
|
|
RB_DEBUG_COUNTER(mjit_frame_JT2JT)
|
|
|
|
RB_DEBUG_COUNTER(mjit_frame_JT2VM)
|
|
|
|
|
2019-03-29 16:54:29 +03:00
|
|
|
/* MJIT cancel counters */
|
|
|
|
RB_DEBUG_COUNTER(mjit_cancel)
|
2019-04-14 16:10:14 +03:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_ivar_inline)
|
2020-03-31 08:27:01 +03:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_exivar_inline)
|
2019-03-29 16:54:29 +03:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_send_inline)
|
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_opt_insn) /* CALL_SIMPLE_METHOD */
|
2019-04-20 08:48:22 +03:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_invalidate_all)
|
2020-05-29 08:45:27 +03:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_leave)
|
2019-03-29 15:52:59 +03:00
|
|
|
|
2019-03-29 16:24:56 +03:00
|
|
|
/* rb_mjit_unit_list length */
|
|
|
|
RB_DEBUG_COUNTER(mjit_length_unit_queue)
|
|
|
|
RB_DEBUG_COUNTER(mjit_length_active_units)
|
|
|
|
RB_DEBUG_COUNTER(mjit_length_compact_units)
|
2019-04-14 10:12:44 +03:00
|
|
|
RB_DEBUG_COUNTER(mjit_length_stale_units)
|
2019-03-29 16:24:56 +03:00
|
|
|
|
2019-03-29 17:44:09 +03:00
|
|
|
/* Other MJIT counters */
|
|
|
|
RB_DEBUG_COUNTER(mjit_compile_failures)
|
|
|
|
|
2018-09-25 21:13:29 +03:00
|
|
|
/* load (not implemented yet) */
|
|
|
|
/*
|
2017-06-01 03:05:33 +03:00
|
|
|
RB_DEBUG_COUNTER(load_files)
|
|
|
|
RB_DEBUG_COUNTER(load_path_is_not_realpath)
|
2018-09-25 21:13:29 +03:00
|
|
|
*/
|
2017-03-10 10:18:03 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef RUBY_DEBUG_COUNTER_H
|
|
|
|
#define RUBY_DEBUG_COUNTER_H 1
|
|
|
|
|
2020-05-08 12:31:09 +03:00
|
|
|
#include "ruby/internal/config.h"
|
2019-12-25 08:19:48 +03:00
|
|
|
#include <stddef.h> /* for size_t */
|
|
|
|
#include "ruby/ruby.h" /* for VALUE */
|
|
|
|
|
2017-02-21 11:18:15 +03:00
|
|
|
#if !defined(__GNUC__) && USE_DEBUG_COUNTER
|
|
|
|
#error "USE_DEBUG_COUNTER is not supported by other than __GNUC__"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
enum rb_debug_counter_type {
|
2017-03-10 10:18:03 +03:00
|
|
|
#define RB_DEBUG_COUNTER(name) RB_DEBUG_COUNTER_##name,
|
2018-02-09 06:15:21 +03:00
|
|
|
#include __FILE__
|
2017-02-21 11:18:15 +03:00
|
|
|
RB_DEBUG_COUNTER_MAX
|
2017-03-10 10:18:03 +03:00
|
|
|
#undef RB_DEBUG_COUNTER
|
2017-02-21 11:18:15 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#if USE_DEBUG_COUNTER
|
2017-03-10 10:18:03 +03:00
|
|
|
extern size_t rb_debug_counter[];
|
2020-12-16 19:29:15 +03:00
|
|
|
RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor;
|
|
|
|
RUBY_EXTERN void rb_debug_counter_add_atomic(enum rb_debug_counter_type type, int add);
|
2017-02-21 11:18:15 +03:00
|
|
|
|
|
|
|
inline static int
|
|
|
|
rb_debug_counter_add(enum rb_debug_counter_type type, int add, int cond)
|
|
|
|
{
|
|
|
|
if (cond) {
|
2020-12-16 19:29:15 +03:00
|
|
|
if (ruby_single_main_ractor != NULL) {
|
|
|
|
rb_debug_counter[(int)type] += add;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_debug_counter_add_atomic(type, add);
|
|
|
|
}
|
2017-02-21 11:18:15 +03:00
|
|
|
}
|
|
|
|
return cond;
|
|
|
|
}
|
|
|
|
|
2020-01-08 10:14:01 +03:00
|
|
|
inline static int
|
|
|
|
rb_debug_counter_max(enum rb_debug_counter_type type, unsigned int num)
|
|
|
|
{
|
2020-12-16 19:29:15 +03:00
|
|
|
// TODO: sync
|
2020-01-08 10:14:01 +03:00
|
|
|
if (rb_debug_counter[(int)type] < num) {
|
|
|
|
rb_debug_counter[(int)type] = num;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix rb_define_singleton_method warning
for debug counters
```
../include/ruby/intern.h:1175:137: warning: passing argument 3 of 'rb_define_singleton_method0' from incompatible pointer type [-Wincompatible-pointer-types]
#define rb_define_singleton_method(klass, mid, func, arity) rb_define_singleton_method_choose_prototypem3((arity),(func))((klass),(mid),(func),(arity));
^
../vm.c:2958:5: note: in expansion of macro 'rb_define_singleton_method'
rb_define_singleton_method(rb_cRubyVM, "show_debug_counters", rb_debug_counter_show, 0);
^~~~~~~~~~~~~~~~~~~~~~~~~~
../include/ruby/intern.h:1139:99: note: expected 'VALUE (*)(VALUE) {aka long unsigned int (*)(long unsigned int)}' but argument is of type 'VALUE (*)(void) {aka long unsigned int (*)(void)}'
__attribute__((__unused__,__weakref__("rb_define_singleton_method"),__nonnull__(2,3)))static void rb_define_singleton_method0 (VALUE,const char*,VALUE(*)(VALUE),int);
```
2019-09-20 11:44:16 +03:00
|
|
|
VALUE rb_debug_counter_reset(VALUE klass);
|
|
|
|
VALUE rb_debug_counter_show(VALUE klass);
|
2019-04-14 09:57:21 +03:00
|
|
|
|
2017-02-21 11:18:15 +03:00
|
|
|
#define RB_DEBUG_COUNTER_INC(type) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, 1)
|
|
|
|
#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (!rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, !(cond)))
|
2020-07-10 07:02:31 +03:00
|
|
|
#define RB_DEBUG_COUNTER_INC_IF(type, cond) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, !!(cond))
|
2020-01-08 10:14:01 +03:00
|
|
|
#define RB_DEBUG_COUNTER_ADD(type, num) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, (num), 1)
|
|
|
|
#define RB_DEBUG_COUNTER_SETMAX(type, num) rb_debug_counter_max(RB_DEBUG_COUNTER_##type, (unsigned int)(num))
|
2017-02-21 11:18:15 +03:00
|
|
|
|
|
|
|
#else
|
|
|
|
#define RB_DEBUG_COUNTER_INC(type) ((void)0)
|
2020-07-10 07:02:31 +03:00
|
|
|
#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (!!(cond))
|
|
|
|
#define RB_DEBUG_COUNTER_INC_IF(type, cond) (!!(cond))
|
2020-01-08 10:14:01 +03:00
|
|
|
#define RB_DEBUG_COUNTER_ADD(type, num) ((void)0)
|
|
|
|
#define RB_DEBUG_COUNTER_SETMAX(type, num) 0
|
2017-02-21 11:18:15 +03:00
|
|
|
#endif
|
|
|
|
|
2018-09-25 21:13:29 +03:00
|
|
|
void rb_debug_counter_show_results(const char *msg);
|
|
|
|
|
2019-12-24 19:32:37 +03:00
|
|
|
RUBY_SYMBOL_EXPORT_BEGIN
|
|
|
|
|
|
|
|
size_t ruby_debug_counter_get(const char **names_ptr, size_t *counters_ptr);
|
|
|
|
void ruby_debug_counter_reset(void);
|
|
|
|
void ruby_debug_counter_show_at_exit(int enable);
|
|
|
|
|
|
|
|
RUBY_SYMBOL_EXPORT_END
|
|
|
|
|
2017-02-21 11:18:15 +03:00
|
|
|
#endif /* RUBY_DEBUG_COUNTER_H */
|