do not disable `trace_` prefix insns.

* vm.c: introduce `ruby_vm_event_enabled_flags` which represents which
  event flags are enabled before.

* vm_trace.c: do not turn off `trace_` prefix instructions because turn on
  overhead is a matter if a program repeats turn on and turn off frequently.

* iseq.c (finish_iseq_build): respect `ruby_vm_event_enabled_flags`.

* vm_insnhelper.c (vm_trace): check `ruby_vm_event_flags` and disable
  lazy trace-off technique (do not disable traces).


git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61122 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
ko1 2017-12-11 19:17:25 +00:00
Родитель 9e9498c227
Коммит 975e3a19a7
5 изменённых файлов: 63 добавлений и 48 удалений

4
iseq.c
Просмотреть файл

@ -361,8 +361,8 @@ finish_iseq_build(rb_iseq_t *iseq)
}
iseq->aux.trace_events = 0;
if (ruby_vm_event_flags & ISEQ_TRACE_EVENTS) {
rb_iseq_trace_set(iseq, ruby_vm_event_flags & ISEQ_TRACE_EVENTS);
if (ruby_vm_event_enabled_flags & ISEQ_TRACE_EVENTS) {
rb_iseq_trace_set(iseq, ruby_vm_event_enabled_flags & ISEQ_TRACE_EVENTS);
}
return Qtrue;
}

1
vm.c
Просмотреть файл

@ -320,6 +320,7 @@ VALUE ruby_vm_const_missing_count = 0;
rb_vm_t *ruby_current_vm_ptr = NULL;
rb_execution_context_t *ruby_current_execution_context_ptr = NULL;
rb_event_flag_t ruby_vm_event_flags;
rb_event_flag_t ruby_vm_event_enabled_flags;
rb_serial_t ruby_vm_global_method_state = 1;
rb_serial_t ruby_vm_global_constant_state = 1;
rb_serial_t ruby_vm_class_serial = 1;

Просмотреть файл

@ -1591,6 +1591,7 @@ RUBY_SYMBOL_EXPORT_BEGIN
extern rb_vm_t *ruby_current_vm_ptr;
extern rb_execution_context_t *ruby_current_execution_context_ptr;
extern rb_event_flag_t ruby_vm_event_flags;
extern rb_event_flag_t ruby_vm_event_enabled_flags;
RUBY_SYMBOL_EXPORT_END

Просмотреть файл

@ -3727,56 +3727,68 @@ NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *re
static void
vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc)
{
const rb_iseq_t *iseq = reg_cfp->iseq;
size_t pos = pc - iseq->body->iseq_encoded;
rb_event_flag_t vm_event_flags = ruby_vm_event_flags;
rb_event_flag_t events = rb_iseq_event_flags(iseq, pos);
rb_event_flag_t event;
if ((events & vm_event_flags) == 0) {
/* disable trace */
rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
if (vm_event_flags == 0) {
return;
}
else {
const rb_iseq_t *iseq = reg_cfp->iseq;
size_t pos = pc - iseq->body->iseq_encoded;
rb_event_flag_t events = rb_iseq_event_flags(iseq, pos);
rb_event_flag_t event;
if (ec->trace_arg != NULL) return;
if ((events & vm_event_flags) == 0) {
#if 0
/* disable trace */
rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
#else
/* do not disable trace because of performance problem
* (re-enable overhead)
*/
#endif
return;
}
if (0) {
fprintf(stderr, "vm_trace>>%4d (%4x) - %s:%d %s\n",
(int)pos,
(int)events,
RSTRING_PTR(rb_iseq_path(iseq)),
(int)rb_iseq_line_no(iseq, pos),
RSTRING_PTR(rb_iseq_label(iseq)));
}
if (ec->trace_arg != NULL) return;
VM_ASSERT(reg_cfp->pc == pc);
VM_ASSERT(events != 0);
VM_ASSERT(vm_event_flags & events);
if (0) {
fprintf(stderr, "vm_trace>>%4d (%4x) - %s:%d %s\n",
(int)pos,
(int)events,
RSTRING_PTR(rb_iseq_path(iseq)),
(int)rb_iseq_line_no(iseq, pos),
RSTRING_PTR(rb_iseq_label(iseq)));
}
/* increment PC because source line is calculated with PC-1 */
if (event = (events & (RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL))) {
VM_ASSERT(event == RUBY_EVENT_CLASS ||
event == RUBY_EVENT_CALL ||
event == RUBY_EVENT_B_CALL);
reg_cfp->pc++;
vm_dtrace(event, ec);
EXEC_EVENT_HOOK(ec, event, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if (events & RUBY_EVENT_LINE) {
reg_cfp->pc++;
vm_dtrace(RUBY_EVENT_LINE, ec);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_LINE, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if (event = (events & (RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN))) {
VM_ASSERT(event == RUBY_EVENT_END ||
event == RUBY_EVENT_RETURN ||
event == RUBY_EVENT_B_RETURN);
reg_cfp->pc++;
vm_dtrace(RUBY_EVENT_LINE, ec);
EXEC_EVENT_HOOK(ec, event, GET_SELF(), 0, 0, 0, TOPN(0));
reg_cfp->pc--;
VM_ASSERT(reg_cfp->pc == pc);
VM_ASSERT(events != 0);
VM_ASSERT(vm_event_flags & events);
/* increment PC because source line is calculated with PC-1 */
if (event = (events & (RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL))) {
VM_ASSERT(event == RUBY_EVENT_CLASS ||
event == RUBY_EVENT_CALL ||
event == RUBY_EVENT_B_CALL);
reg_cfp->pc++;
vm_dtrace(event, ec);
EXEC_EVENT_HOOK(ec, event, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if (events & RUBY_EVENT_LINE) {
reg_cfp->pc++;
vm_dtrace(RUBY_EVENT_LINE, ec);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_LINE, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if (event = (events & (RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN))) {
VM_ASSERT(event == RUBY_EVENT_END ||
event == RUBY_EVENT_RETURN ||
event == RUBY_EVENT_B_RETURN);
reg_cfp->pc++;
vm_dtrace(RUBY_EVENT_LINE, ec);
EXEC_EVENT_HOOK(ec, event, GET_SELF(), 0, 0, 0, TOPN(0));
reg_cfp->pc--;
}
}
}

Просмотреть файл

@ -65,14 +65,15 @@ static void
update_global_event_hook(rb_event_flag_t vm_events)
{
rb_event_flag_t new_iseq_events = vm_events & ISEQ_TRACE_EVENTS;
rb_event_flag_t cur_iseq_events = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
rb_event_flag_t enabled_iseq_events = ruby_vm_event_enabled_flags & ISEQ_TRACE_EVENTS;
if (new_iseq_events > cur_iseq_events) {
if (new_iseq_events & ~enabled_iseq_events) {
/* write all ISeqs iff new events are added */
rb_iseq_trace_set_all(new_iseq_events);
rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
}
ruby_vm_event_flags = vm_events;
ruby_vm_event_enabled_flags |= vm_events;
rb_objspace_set_event_hook(vm_events);
}