Avoid generating opt_send with cfunc cc with JIT

only for opt_nil_p and opt_not.

While vm_method_cfunc_is is used for opt_eq too, many fast paths of it
don't call it. So if it's populated, it should generate opt_send,
regardless of cfunc or not. And again, opt_neq isn't relevant due to the
difference in operands.
So opt_nil_p and opt_not are the only variants using vm_method_cfunc_is
like they use.

```
$ benchmark-driver -v --rbenv 'before2 --jit::ruby --jit;before --jit;after --jit' benchmark/mjit_opt_cc_insns.yml --repeat-count=4
before2 --jit: ruby 2.8.0dev (2020-06-22T08:37:37Z master 3238641750) +JIT [x86_64-linux]
before --jit: ruby 2.8.0dev (2020-06-23T01:01:24Z master 9ce2066209) +JIT [x86_64-linux]
after --jit: ruby 2.8.0dev (2020-06-23T06:58:37Z master 17e9df3157) +JIT [x86_64-linux]
last_commit=Avoid generating opt_send with cfunc cc with JIT
Calculating -------------------------------------
                     before2 --jit  before --jit  after --jit
        mjit_nil?(1)       54.204M       75.536M      75.031M i/s -     40.000M times in 0.737947s 0.529548s 0.533110s
         mjit_not(1)       53.822M       70.921M      71.920M i/s -     40.000M times in 0.743195s 0.564007s 0.556171s
     mjit_eq(1, nil)        7.367M        6.496M       7.331M i/s -      8.000M times in 1.085882s 1.231470s 1.091327s

Comparison:
                     mjit_nil?(1)
        before --jit:  75536059.3 i/s
         after --jit:  75031409.4 i/s - 1.01x  slower
       before2 --jit:  54204431.6 i/s - 1.39x  slower

                      mjit_not(1)
         after --jit:  71920324.1 i/s
        before --jit:  70921063.1 i/s - 1.01x  slower
       before2 --jit:  53821697.6 i/s - 1.34x  slower

                  mjit_eq(1, nil)
       before2 --jit:   7367280.0 i/s
         after --jit:   7330527.4 i/s - 1.01x  slower
        before --jit:   6496302.8 i/s - 1.13x  slower
```
This commit is contained in:
Takashi Kokubun 2020-06-22 23:30:37 -07:00
Родитель 6aa3aaac05
Коммит 37a2e48d76
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 6FFC433B12EE23DD
3 изменённых файлов: 22 добавлений и 7 удалений

Просмотреть файл

@ -17,6 +17,7 @@
#include "internal.h" #include "internal.h"
#include "internal/compile.h" #include "internal/compile.h"
#include "internal/hash.h" #include "internal/hash.h"
#include "internal/object.h"
#include "internal/variable.h" #include "internal/variable.h"
#include "mjit.h" #include "mjit.h"
#include "vm_core.h" #include "vm_core.h"
@ -97,9 +98,16 @@ captured_cc_entries(const struct compile_status *status)
// Returns true if call cache is still not obsoleted and vm_cc_cme(cc)->def->type is available. // Returns true if call cache is still not obsoleted and vm_cc_cme(cc)->def->type is available.
static bool static bool
has_valid_method_type(CALL_CACHE cc, rb_method_type_t type) has_valid_method_type(CALL_CACHE cc)
{ {
return vm_cc_cme(cc) != NULL && vm_cc_cme(cc)->def->type == type; return vm_cc_cme(cc) != NULL;
}
// Returns true if MJIT thinks this cc's opt_* insn may fallback to opt_send_without_block.
static bool
has_cache_for_send(CALL_CACHE cc, bool cfunc_cached)
{
return has_valid_method_type(cc) && (!cfunc_cached || vm_cc_cme(cc)->def->type != VM_METHOD_TYPE_CFUNC);
} }
// Returns true if iseq can use fastpath for setup, otherwise NULL. This becomes true in the same condition // Returns true if iseq can use fastpath for setup, otherwise NULL. This becomes true in the same condition
@ -439,8 +447,9 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
const struct rb_callcache *cc = captured_cc_entries(status)[call_data_index(cd, body)]; // use copy to avoid race condition const struct rb_callcache *cc = captured_cc_entries(status)[call_data_index(cd, body)]; // use copy to avoid race condition
const rb_iseq_t *child_iseq; const rb_iseq_t *child_iseq;
if (has_valid_method_type(cc, VM_METHOD_TYPE_ISEQ) && if (has_valid_method_type(cc) &&
!(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path !(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path
vm_cc_cme(cc)->def->type == VM_METHOD_TYPE_ISEQ &&
fastpath_applied_iseq_p(ci, cc, child_iseq = def_iseq_ptr(vm_cc_cme(cc)->def)) && fastpath_applied_iseq_p(ci, cc, child_iseq = def_iseq_ptr(vm_cc_cme(cc)->def)) &&
// CC_SET_FASTPATH in vm_callee_setup_arg // CC_SET_FASTPATH in vm_callee_setup_arg
inlinable_iseq_p(child_iseq->body)) { inlinable_iseq_p(child_iseq->body)) {

Просмотреть файл

@ -20,13 +20,13 @@
const CALL_INFO ci = cd->ci; const CALL_INFO ci = cd->ci;
int kw_splat = IS_ARGS_KW_SPLAT(ci) > 0; int kw_splat = IS_ARGS_KW_SPLAT(ci) > 0;
extern bool rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci); extern bool rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci);
if (!status->compile_info->disable_send_cache && ( if (!status->compile_info->disable_send_cache && has_valid_method_type(captured_cc) && (
% # `CC_SET_FASTPATH(cd->cc, vm_call_cfunc_with_frame, ...)` in `vm_call_cfunc` % # `CC_SET_FASTPATH(cd->cc, vm_call_cfunc_with_frame, ...)` in `vm_call_cfunc`
(has_valid_method_type(captured_cc, VM_METHOD_TYPE_CFUNC) (vm_cc_cme(captured_cc)->def->type == VM_METHOD_TYPE_CFUNC
&& !rb_splat_or_kwargs_p(ci) && !kw_splat) && !rb_splat_or_kwargs_p(ci) && !kw_splat)
% # `CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(...), vm_call_iseq_optimizable_p(...))` in `vm_callee_setup_arg`, % # `CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(...), vm_call_iseq_optimizable_p(...))` in `vm_callee_setup_arg`,
% # and support only non-VM_CALL_TAILCALL path inside it % # and support only non-VM_CALL_TAILCALL path inside it
|| (has_valid_method_type(captured_cc, VM_METHOD_TYPE_ISEQ) || (vm_cc_cme(captured_cc)->def->type == VM_METHOD_TYPE_ISEQ
&& fastpath_applied_iseq_p(ci, captured_cc, iseq = def_iseq_ptr(vm_cc_cme(captured_cc)->def)) && fastpath_applied_iseq_p(ci, captured_cc, iseq = def_iseq_ptr(vm_cc_cme(captured_cc)->def))
&& !(vm_ci_flag(ci) & VM_CALL_TAILCALL)) && !(vm_ci_flag(ci) & VM_CALL_TAILCALL))
)) { )) {

Просмотреть файл

@ -29,6 +29,12 @@
% insn.expr.expr.lines.any? { |l| l.match(/\A\s+CALL_SIMPLE_METHOD\(\);\s+\z/) } % insn.expr.expr.lines.any? { |l| l.match(/\A\s+CALL_SIMPLE_METHOD\(\);\s+\z/) }
% end.map(&:name) % end.map(&:name)
% %
% # These insns cache cfunc in cc under optimized circumstances. They don't generate opt_send when cfunc is cached.
% cfunc_insns = [
% 'opt_nil_p',
% 'opt_not',
% ]
%
% # Available variables and macros in JIT-ed function: % # Available variables and macros in JIT-ed function:
% # ec: the first argument of _mjitXXX % # ec: the first argument of _mjitXXX
% # reg_cfp: the second argument of _mjitXXX % # reg_cfp: the second argument of _mjitXXX
@ -56,7 +62,7 @@ switch (insn) {
% when *send_compatible_opt_insns % when *send_compatible_opt_insns
% # To avoid cancel, just emit `opt_send_without_block` instead of `opt_*` insn if call cache is populated. % # To avoid cancel, just emit `opt_send_without_block` instead of `opt_*` insn if call cache is populated.
% cd_index = insn.opes.index { |o| o.fetch(:type) == 'CALL_DATA' } % cd_index = insn.opes.index { |o| o.fetch(:type) == 'CALL_DATA' }
if (has_valid_method_type(captured_cc_entries(status)[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)], VM_METHOD_TYPE_ISEQ)) { if (has_cache_for_send(captured_cc_entries(status)[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)], <%= cfunc_insns.include?(insn.name) %>)) {
<%= render 'mjit_compile_send', locals: { insn: opt_send_without_block } -%> <%= render 'mjit_compile_send', locals: { insn: opt_send_without_block } -%>
<%= render 'mjit_compile_insn', locals: { insn: opt_send_without_block } -%> <%= render 'mjit_compile_insn', locals: { insn: opt_send_without_block } -%>
break; break;