зеркало из https://github.com/github/ruby.git
Use builtin_inline_p to avoid pushing a frame for primitive C methods (#63)
* Use builtin_inline_p to skip a frame of C methods * Fix bugs in primitive cfunc call code * Remove if (push_frame) {} * Remove if (push_frame) {} * Push Aaron's fix to avoid hardcoding insn lengths Co-authored-by: Takashi Kokubun <takashikkbn@gmail.com>
This commit is contained in:
Родитель
9f46e6e64b
Коммит
860589c7fa
|
@ -2253,23 +2253,18 @@ gen_send_cfunc(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const
|
|||
//print_str(cb, "recv");
|
||||
//print_ptr(cb, recv);
|
||||
|
||||
// If this function needs a Ruby stack frame
|
||||
const bool push_frame = cfunc_needs_frame(cfunc);
|
||||
|
||||
// Create a size-exit to fall back to the interpreter
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
// Check for interrupts
|
||||
yjit_check_ints(cb, side_exit);
|
||||
|
||||
if (push_frame) {
|
||||
// Stack overflow check
|
||||
// #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin)
|
||||
// REG_CFP <= REG_SP + 4 * sizeof(VALUE) + sizeof(rb_control_frame_t)
|
||||
lea(cb, REG0, ctx_sp_opnd(ctx, sizeof(VALUE) * 4 + sizeof(rb_control_frame_t)));
|
||||
cmp(cb, REG_CFP, REG0);
|
||||
jle_ptr(cb, COUNTED_EXIT(side_exit, send_se_cf_overflow));
|
||||
}
|
||||
|
||||
// Points to the receiver operand on the stack
|
||||
x86opnd_t recv = ctx_stack_opnd(ctx, argc);
|
||||
|
@ -2277,7 +2272,6 @@ gen_send_cfunc(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const
|
|||
// Store incremented PC into current control frame in case callee raises.
|
||||
jit_save_pc(jit, REG0);
|
||||
|
||||
if (push_frame) {
|
||||
if (block) {
|
||||
// Change cfp->block_code in the current frame. See vm_caller_setup_arg_block().
|
||||
// VM_CFP_TO_CAPTURED_BLCOK does &cfp->self, rb_captured_block->code.iseq aliases
|
||||
|
@ -2341,7 +2335,6 @@ gen_send_cfunc(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const
|
|||
mov(cb, member_opnd(REG1, rb_control_frame_t, ep), REG0);
|
||||
mov(cb, REG0, recv);
|
||||
mov(cb, member_opnd(REG1, rb_control_frame_t, self), REG0);
|
||||
}
|
||||
|
||||
// Verify that we are calling the right function
|
||||
if (YJIT_CHECK_MODE > 0) {
|
||||
|
@ -2407,15 +2400,12 @@ gen_send_cfunc(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const
|
|||
x86opnd_t stack_ret = ctx_stack_push(ctx, TYPE_UNKNOWN);
|
||||
mov(cb, stack_ret, RAX);
|
||||
|
||||
// If this function needs a Ruby stack frame
|
||||
if (push_frame) {
|
||||
// Pop the stack frame (ec->cfp++)
|
||||
add(
|
||||
cb,
|
||||
member_opnd(REG_EC, rb_execution_context_t, cfp),
|
||||
imm_opnd(sizeof(rb_control_frame_t))
|
||||
);
|
||||
}
|
||||
|
||||
// Note: gen_oswb_iseq() jumps to the next instruction with ctx->sp_offset == 0
|
||||
// after the call, while this does not. This difference prevents
|
||||
|
@ -2463,6 +2453,30 @@ iseq_lead_only_arg_setup_p(const rb_iseq_t *iseq)
|
|||
bool rb_iseq_only_optparam_p(const rb_iseq_t *iseq);
|
||||
bool rb_iseq_only_kwparam_p(const rb_iseq_t *iseq);
|
||||
|
||||
// If true, the iseq is leaf and it can be replaced by a single C call.
|
||||
static bool
|
||||
rb_leaf_invokebuiltin_iseq_p(const rb_iseq_t *iseq)
|
||||
{
|
||||
unsigned int invokebuiltin_len = insn_len(BIN(opt_invokebuiltin_delegate_leave));
|
||||
unsigned int leave_len = insn_len(BIN(leave));
|
||||
|
||||
return iseq->body->iseq_size == (
|
||||
(invokebuiltin_len + leave_len) &&
|
||||
rb_vm_insn_addr2opcode((void *)iseq->body->iseq_encoded[0]) == BIN(opt_invokebuiltin_delegate_leave) &&
|
||||
rb_vm_insn_addr2opcode((void *)iseq->body->iseq_encoded[invokebuiltin_len]) == BIN(leave) &&
|
||||
iseq->body->builtin_inline_p
|
||||
);
|
||||
}
|
||||
|
||||
// Return an rb_builtin_function if the iseq contains only that leaf builtin function.
|
||||
static const struct rb_builtin_function*
|
||||
rb_leaf_builtin_function(const rb_iseq_t *iseq)
|
||||
{
|
||||
if (!rb_leaf_invokebuiltin_iseq_p(iseq))
|
||||
return NULL;
|
||||
return (const struct rb_builtin_function *)iseq->body->iseq_encoded[1];
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const rb_callable_method_entry_t *cme, rb_iseq_t *block, const int32_t argc)
|
||||
{
|
||||
|
@ -2529,6 +2543,39 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r
|
|||
// Check for interrupts
|
||||
yjit_check_ints(cb, side_exit);
|
||||
|
||||
const struct rb_builtin_function *leaf_builtin = rb_leaf_builtin_function(iseq);
|
||||
|
||||
if (leaf_builtin && !block && leaf_builtin->argc + 1 <= NUM_C_ARG_REGS) {
|
||||
// TODO: figure out if this is necessary
|
||||
// If the calls don't allocate, do they need up to date PC, SP?
|
||||
// Save YJIT registers
|
||||
yjit_save_regs(cb);
|
||||
|
||||
// Get a pointer to the top of the stack
|
||||
lea(cb, REG0, ctx_stack_opnd(ctx, 0));
|
||||
|
||||
// Call the builtin func (ec, recv, arg1, arg2, ...)
|
||||
mov(cb, C_ARG_REGS[0], REG_EC);
|
||||
|
||||
// Copy self and arguments
|
||||
for (int32_t i = 0; i < leaf_builtin->argc + 1; i++) {
|
||||
x86opnd_t stack_opnd = mem_opnd(64, REG0, -(leaf_builtin->argc - i) * SIZEOF_VALUE);
|
||||
x86opnd_t c_arg_reg = C_ARG_REGS[i + 1];
|
||||
mov(cb, c_arg_reg, stack_opnd);
|
||||
}
|
||||
ctx_stack_pop(ctx, leaf_builtin->argc + 1);
|
||||
call_ptr(cb, REG0, (void *)leaf_builtin->func_ptr);
|
||||
|
||||
// Load YJIT registers
|
||||
yjit_load_regs(cb);
|
||||
|
||||
// Push the return value
|
||||
x86opnd_t stack_ret = ctx_stack_push(ctx, TYPE_UNKNOWN);
|
||||
mov(cb, stack_ret, RAX);
|
||||
|
||||
return YJIT_KEEP_COMPILING;
|
||||
}
|
||||
|
||||
// Stack overflow check
|
||||
// #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin)
|
||||
ADD_COMMENT(cb, "stack overflow check");
|
||||
|
|
13
yjit_iface.c
13
yjit_iface.c
|
@ -126,19 +126,6 @@ check_cfunc_dispatch(VALUE receiver, struct rb_callinfo *ci, void *callee, rb_ca
|
|||
|
||||
MJIT_FUNC_EXPORTED VALUE rb_hash_has_key(VALUE hash, VALUE key);
|
||||
|
||||
bool
|
||||
cfunc_needs_frame(const rb_method_cfunc_t *cfunc)
|
||||
{
|
||||
void* fptr = (void*)cfunc->func;
|
||||
|
||||
// Leaf C functions do not need a stack frame
|
||||
// or a stack overflow check
|
||||
return !(
|
||||
// Hash#key?
|
||||
fptr == (void*)rb_hash_has_key
|
||||
);
|
||||
}
|
||||
|
||||
// GC root for interacting with the GC
|
||||
struct yjit_root_struct {
|
||||
int unused; // empty structs are not legal in C99
|
||||
|
|
|
@ -100,7 +100,6 @@ VALUE *yjit_iseq_pc_at_idx(const rb_iseq_t *iseq, uint32_t insn_idx);
|
|||
int yjit_opcode_at_pc(const rb_iseq_t *iseq, const VALUE *pc);
|
||||
|
||||
void check_cfunc_dispatch(VALUE receiver, struct rb_callinfo *ci, void *callee, rb_callable_method_entry_t *compile_time_cme);
|
||||
bool cfunc_needs_frame(const rb_method_cfunc_t *cfunc);
|
||||
|
||||
RBIMPL_ATTR_NODISCARD() bool assume_bop_not_redefined(block_t *block, int redefined_flag, enum ruby_basic_operators bop);
|
||||
void assume_method_lookup_stable(VALUE receiver_klass, const rb_callable_method_entry_t *cme, block_t *block);
|
||||
|
|
Загрузка…
Ссылка в новой задаче