Port over putnil, putobject, and gen_leave()

* Remove x86-64 dependency from codegen.rs

* Port over putnil and putobject

* Port over gen_leave()

* Complete port of gen_leave()

* Fix bug in x86 instruction splitting
This commit is contained in:
Maxime Chevalier-Boisvert 2022-06-09 16:29:55 -04:00 коммит произвёл Takashi Kokubun
Родитель d75c346c1c
Коммит 0000984fed
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 6FFC433B12EE23DD
4 изменённых файлов: 168 добавлений и 221 удалений

Просмотреть файл

@ -245,7 +245,7 @@ pub const R13B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::
pub const R14B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 14 });
pub const R15B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 15 });
// C argument registers
// C argument registers on this platform
pub const C_ARG_REGS: [X86Opnd; 6] = [RDI, RSI, RDX, RCX, R8, R9];
//===========================================================================

Просмотреть файл

@ -6,7 +6,7 @@ use std::fmt;
use std::convert::From;
use crate::cruby::{VALUE};
use crate::virtualmem::{CodePtr};
use crate::asm::{CodeBlock};
use crate::asm::{CodeBlock, uimm_num_bits, imm_num_bits};
use crate::asm::x86_64::{X86Opnd, X86Imm, X86UImm, X86Reg, X86Mem, RegType};
use crate::core::{Context, Type, TempMapping};
use crate::codegen::{JITState};
@ -21,6 +21,9 @@ pub const EC: Opnd = _EC;
pub const CFP: Opnd = _CFP;
pub const SP: Opnd = _SP;
pub const C_ARG_OPNDS: [Opnd; 6] = _C_ARG_OPNDS;
pub const C_RET_OPND: Opnd = _C_RET_OPND;
/// Instruction opcodes
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Op
@ -77,6 +80,9 @@ pub enum Op
// Compare two operands
Cmp,
// Unconditional jump which takes an address operand
JmpOpnd,
// Low-level conditional jump instructions
Jbe,
Je,
@ -92,110 +98,8 @@ pub enum Op
// C function return
CRet,
/*
// The following are conditional jump instructions. They all accept as their
// first operand an EIR_LABEL_NAME, which is used as the target of the jump.
//
// The OP_JUMP_EQ instruction accepts two additional operands, to be
// compared for equality. If they're equal, then the generated code jumps to
// the target label. If they're not, then it continues on to the next
// instruction.
JumpEq,
// The OP_JUMP_NE instruction is very similar to the OP_JUMP_EQ instruction,
// except it compares for inequality instead.
JumpNe,
// Checks the overflow flag and conditionally jumps to the target if it is
// currently set.
JumpOvf,
// A low-level call instruction for calling a function by a pointer. It
// accepts one operand of type EIR_IMM that should be a pointer to the
// function. Usually this is done by first casting the function to a void*,
// as in: ir_const_ptr((void *)&my_function)).
Call,
// Calls a function by a pointer and returns an operand that contains the
// result of the function. Accepts as its operands a pointer to a function
// of type EIR_IMM (usually generated from ir_const_ptr) and a variable
// number of arguments to the function being called.
//
// This is the higher-level instruction that should be used when you want to
// call a function with arguments, as opposed to OP_CALL which is
// lower-level and just calls a function without moving arguments into
// registers for you.
CCall,
// Returns from the function being generated immediately. This is different
// from OP_RETVAL in that it does nothing with the return value register
// (whatever is in there is what will get returned). Accepts no operands.
Ret,
// First, moves a value into the return value register. Then, returns from
// the generated function. Accepts as its only operand the value that should
// be returned from the generated function.
RetVal,
// A conditional move instruction that should be preceeded at some point by
// an OP_CMP instruction that would have set the requisite comparison flags.
// Accepts 2 operands, both of which are expected to be of the EIR_REG type.
//
// If the comparison indicates the left compared value is greater than or
// equal to the right compared value, then the conditional move is executed,
// otherwise we just continue on to the next instruction.
//
// This is considered a low-level instruction, and the OP_SELECT_* variants
// should be preferred if possible.
CMovGE,
// The same as OP_CMOV_GE, except the comparison is greater than.
CMovGT,
// The same as OP_CMOV_GE, except the comparison is less than or equal.
CMovLE,
// The same as OP_CMOV_GE, except the comparison is less than.
CMovLT,
// Selects between two different values based on a comparison of two other
// values. Accepts 4 operands. The first two are the basis of the
// comparison. The second two are the "then" case and the "else" case. You
// can effectively think of this instruction as a ternary operation, where
// the first two values are being compared.
//
// OP_SELECT_GE performs the described ternary using a greater than or equal
// comparison, that is if the first operand is greater than or equal to the
// second operand.
SelectGE,
// The same as OP_SELECT_GE, except the comparison is greater than.
SelectGT,
// The same as OP_SELECT_GE, except the comparison is less than or equal.
SelectLE,
// The same as OP_SELECT_GE, except the comparison is less than.
SelectLT,
// For later:
// These encode Ruby true/false semantics
// Can be used to enable op fusion of Ruby compare + branch.
// OP_JUMP_TRUE, // (opnd, target)
// OP_JUMP_FALSE, // (opnd, target)
// For later:
// OP_GUARD_HEAP, // (opnd, target)
// OP_GUARD_IMM, // (opnd, target)
// OP_GUARD_FIXNUM, // (opnd, target)
// For later:
// OP_COUNTER_INC, (counter_name)
// For later:
// OP_LEA,
// OP_TEST,
*/
// Trigger a debugger breakpoint
Breakpoint,
}
// Memory location
@ -256,6 +160,12 @@ impl Opnd
}
}
impl From<usize> for Opnd {
fn from(value: usize) -> Self {
Opnd::UImm(value.try_into().unwrap())
}
}
impl From<VALUE> for Opnd {
fn from(value: VALUE) -> Self {
let VALUE(uimm) = value;
@ -522,6 +432,18 @@ impl Assembler
let opnd1 = asm.load(opnds[1]);
asm.push_insn(op, vec![opnds[0], opnd1], None);
},
[Opnd::Mem(_), Opnd::UImm(val)] => {
if uimm_num_bits(*val) > 32 {
let opnd1 = asm.load(opnds[1]);
asm.push_insn(op, vec![opnds[0], opnd1], None);
}
else
{
asm.push_insn(op, opnds, target);
}
},
_ => {
asm.push_insn(op, opnds, target);
}
@ -609,7 +531,7 @@ impl Assembler
// C return values need to be mapped to the C return register
if op == Op::CCall {
out_reg = Opnd::Reg(take_reg(&mut pool, &regs, &RET_REG))
out_reg = Opnd::Reg(take_reg(&mut pool, &regs, &C_RET_REG))
}
// If this instruction's first operand maps to a register and
@ -689,6 +611,18 @@ macro_rules! def_push_jcc {
};
}
macro_rules! def_push_0_opnd_no_out {
($op_name:ident, $opcode:expr) => {
impl Assembler
{
pub fn $op_name(&mut self)
{
self.push_insn($opcode, vec![], None);
}
}
};
}
macro_rules! def_push_1_opnd {
($op_name:ident, $opcode:expr) => {
impl Assembler
@ -737,6 +671,7 @@ macro_rules! def_push_2_opnd_no_out {
};
}
def_push_1_opnd_no_out!(jmp_opnd, Op::JmpOpnd);
def_push_jcc!(je, Op::Je);
def_push_jcc!(jbe, Op::Jbe);
def_push_jcc!(jnz, Op::Jnz);
@ -752,6 +687,7 @@ def_push_2_opnd_no_out!(store, Op::Store);
def_push_2_opnd_no_out!(mov, Op::Mov);
def_push_2_opnd_no_out!(cmp, Op::Cmp);
def_push_2_opnd_no_out!(test, Op::Test);
def_push_0_opnd_no_out!(breakpoint, Op::Breakpoint);
// NOTE: these methods are temporary and will likely move
// to context.rs later

Просмотреть файл

@ -6,7 +6,7 @@ use crate::asm::{CodeBlock};
use crate::asm::x86_64::*;
use crate::codegen::{JITState};
use crate::cruby::*;
use crate::backend::ir::*;
use crate::backend::ir::{Assembler, Opnd, Target, Op, Mem};
// Use the x86 register type for this platform
pub type Reg = X86Reg;
@ -16,8 +16,19 @@ pub const _CFP: Opnd = Opnd::Reg(R13_REG);
pub const _EC: Opnd = Opnd::Reg(R12_REG);
pub const _SP: Opnd = Opnd::Reg(RBX_REG);
// C argument registers on this platform
pub const _C_ARG_OPNDS: [Opnd; 6] = [
Opnd::Reg(RDI_REG),
Opnd::Reg(RSI_REG),
Opnd::Reg(RDX_REG),
Opnd::Reg(RCX_REG),
Opnd::Reg(R8_REG),
Opnd::Reg(R9_REG)
];
// C return value register on this platform
pub const RET_REG: Reg = RAX_REG;
pub const C_RET_REG: Reg = RAX_REG;
pub const _C_RET_OPND: Opnd = Opnd::Reg(RAX_REG);
/// Map Opnd to X86Opnd
impl From<Opnd> for X86Opnd {
@ -58,7 +69,7 @@ impl Assembler
]
}
/// Emit platform-specific machine code
/// Split IR instructions for the x86 platform
fn x86_split(mut self) -> Assembler
{
let live_ranges: Vec<usize> = std::mem::take(&mut self.live_ranges);
@ -76,7 +87,8 @@ impl Assembler
}
},
[Opnd::Mem(_), _] => {
// We have to load memory and register operands to avoid corrupting them
[Opnd::Mem(_) | Opnd::Reg(_), _] => {
let opnd0 = asm.load(opnds[0]);
asm.push_insn(op, vec![opnd0, opnds[1]], None);
return;
@ -154,7 +166,7 @@ impl Assembler
Op::CRet => {
// TODO: bias allocation towards return register
if insn.opnds[0] != Opnd::Reg(RET_REG) {
if insn.opnds[0] != Opnd::Reg(C_RET_REG) {
mov(cb, RAX, insn.opnds[0].into());
}
@ -167,17 +179,11 @@ impl Assembler
// Test and set flags
Op::Test => test(cb, insn.opnds[0].into(), insn.opnds[1].into()),
Op::Je => {
match insn.target.unwrap() {
Target::Label(idx) => {
Op::JmpOpnd => jmp_rm(cb, insn.opnds[0].into()),
dbg!(idx);
je_label(cb, idx);
Op::Je => je_label(cb, insn.target.unwrap().unwrap_label_idx()),
},
_ => unimplemented!()
}
}
Op::Breakpoint => int3(cb),
_ => panic!("unsupported instruction passed to x86 backend: {:?}", insn.op)
};

Просмотреть файл

@ -1,7 +1,7 @@
// We use the YARV bytecode constants which have a CRuby-style name
#![allow(non_upper_case_globals)]
use crate::asm::x86_64::*;
//use crate::asm::x86_64::*;
use crate::asm::*;
use crate::backend::ir::*;
use crate::core::*;
@ -25,15 +25,15 @@ use std::slice;
pub use crate::virtualmem::CodePtr;
// Callee-saved registers
pub const REG_CFP: X86Opnd = R13;
pub const REG_EC: X86Opnd = R12;
pub const REG_SP: X86Opnd = RBX;
//pub const REG_CFP: X86Opnd = R13;
//pub const REG_EC: X86Opnd = R12;
//pub const REG_SP: X86Opnd = RBX;
// Scratch registers used by YJIT
pub const REG0: X86Opnd = RAX;
pub const REG0_32: X86Opnd = EAX;
pub const REG0_8: X86Opnd = AL;
pub const REG1: X86Opnd = RCX;
//pub const REG0: X86Opnd = RAX;
//pub const REG0_32: X86Opnd = EAX;
//pub const REG0_8: X86Opnd = AL;
//pub const REG1: X86Opnd = RCX;
// A block that can be invalidated needs space to write a jump.
// We'll reserve a minimum size for any block that could
@ -210,19 +210,25 @@ fn add_comment(cb: &mut CodeBlock, comment_str: &str) {
/// Increment a profiling counter with counter_name
#[cfg(not(feature = "stats"))]
macro_rules! gen_counter_incr {
($cb:tt, $counter_name:ident) => {};
($asm:tt, $counter_name:ident) => {};
}
#[cfg(feature = "stats")]
macro_rules! gen_counter_incr {
($cb:tt, $counter_name:ident) => {
($asm:tt, $counter_name:ident) => {
if (get_option!(gen_stats)) {
// Get a pointer to the counter variable
let ptr = ptr_to_counter!($counter_name);
// Use REG1 because there might be return value in REG0
mov($cb, REG1, const_ptr_opnd(ptr as *const u8));
write_lock_prefix($cb); // for ractors.
add($cb, mem_opnd(64, REG1, 0), imm_opnd(1));
// Load the pointer into a register
let ptr_reg = $asm.load(Opnd::const_ptr(ptr as *const u8));
let counter_opnd = Opnd::mem(64, ptr_reg, 0);
// FIXME: do we want an atomic add, or an atomic store or swap for arm?
//write_lock_prefix($cb); // for ractors.
// Increment and store the updated value
let incr_opnd = $asm.add(counter_opnd, 1.into());
$asm.store(counter_opnd, incr_opnd);
}
};
}
@ -292,8 +298,7 @@ fn gen_save_sp(jit: &JITState, asm: &mut Assembler, ctx: &mut Context) {
fn jit_prepare_routine_call(
jit: &mut JITState,
ctx: &mut Context,
asm: &mut Assembler,
scratch_reg: X86Opnd,
asm: &mut Assembler
) {
jit.record_boundary_patch_point = true;
jit_save_pc(jit, asm);
@ -396,6 +401,9 @@ fn verify_ctx(jit: &JITState, ctx: &Context) {
fn gen_exit(exit_pc: *mut VALUE, ctx: &Context, cb: &mut CodeBlock) -> CodePtr {
let code_ptr = cb.get_write_ptr();
todo!();
/*
add_comment(cb, "exit to interpreter");
// Generate the code to exit to the interpreters
@ -432,6 +440,7 @@ fn gen_exit(exit_pc: *mut VALUE, ctx: &Context, cb: &mut CodeBlock) -> CodePtr {
ret(cb);
return code_ptr;
*/
}
// Fill code_for_exit_from_stub. This is used by branch_stub_hit() to exit
@ -442,16 +451,20 @@ fn gen_code_for_exit_from_stub(ocb: &mut OutlinedCb) -> CodePtr {
let ocb = ocb.unwrap();
let code_ptr = ocb.get_write_ptr();
todo!();
/*
gen_counter_incr!(ocb, exit_from_branch_stub);
pop(ocb, REG_SP);
pop(ocb, REG_EC);
pop(ocb, REG_CFP);
cpop(ocb, REG_SP);
cpop(ocb, REG_EC);
cpop(ocb, REG_CFP);
mov(ocb, RAX, uimm_opnd(Qundef.into()));
ret(ocb);
return code_ptr;
*/
}
// :side-exit:
@ -504,6 +517,9 @@ fn gen_full_cfunc_return(ocb: &mut OutlinedCb) -> CodePtr {
let cb = ocb.unwrap();
let code_ptr = cb.get_write_ptr();
todo!();
/*
// This chunk of code expect REG_EC to be filled properly and
// RAX to contain the return value of the C method.
@ -524,6 +540,7 @@ fn gen_full_cfunc_return(ocb: &mut OutlinedCb) -> CodePtr {
ret(cb);
return code_ptr;
*/
}
/// Generate a continuation for leave that exits to the interpreter at REG_CFP->pc.
@ -531,6 +548,7 @@ fn gen_full_cfunc_return(ocb: &mut OutlinedCb) -> CodePtr {
fn gen_leave_exit(ocb: &mut OutlinedCb) -> CodePtr {
let ocb = ocb.unwrap();
let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();
// Note, gen_leave() fully reconstructs interpreter state and leaves the
// return value in RAX before coming here.
@ -539,11 +557,22 @@ fn gen_leave_exit(ocb: &mut OutlinedCb) -> CodePtr {
// Every exit to the interpreter should be counted
//gen_counter_incr!(ocb, leave_interp_return);
pop(ocb, REG_SP);
pop(ocb, REG_EC);
pop(ocb, REG_CFP);
asm.cpop(SP);
asm.cpop(EC);
asm.cpop(CFP);
ret(ocb);
// FIXME: we're currently assuming that the return value is in RAX,
// left there by gen_leave() ...
//
// What are our options?
// We could put the return value in C_RET_REG?
// Then call asm.ret with C_RET_REG?
asm.cret(C_RET_OPND);
asm.compile(ocb);
return code_ptr;
}
@ -604,8 +633,8 @@ pub fn gen_entry_prologue(cb: &mut CodeBlock, iseq: IseqPtr, insn_idx: u32) -> O
asm.cpush(SP);
// We are passed EC and CFP as arguments
asm.mov(EC, C_ARG_REGS[0].into());
asm.mov(CFP, C_ARG_REGS[1].into());
asm.mov(EC, C_ARG_OPNDS[0]);
asm.mov(CFP, C_ARG_OPNDS[1]);
// Load the current SP from the CFP into REG_SP
asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
@ -765,7 +794,7 @@ pub fn gen_single_block(
// :count-placement:
// Count bytecode instructions that execute in generated code.
// Note that the increment happens even when the output takes side exit.
gen_counter_incr!(cb, exec_instruction);
gen_counter_incr!(asm, exec_instruction);
// Add a comment for the name of the YARV instruction
asm.comment(&insn_name(opcode));
@ -919,7 +948,7 @@ fn gen_swap(
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
stack_swap(jit, ctx, asm, 0, 1, REG0, REG1);
stack_swap(jit, ctx, asm, 0, 1);
KeepCompiling
}
@ -929,8 +958,6 @@ fn stack_swap(
asm: &mut Assembler,
offset0: u16,
offset1: u16,
_reg0: X86Opnd,
_reg1: X86Opnd,
) {
let stack0_mem = ctx.ir_stack_opnd(offset0 as i32);
let stack1_mem = ctx.ir_stack_opnd(offset1 as i32);
@ -947,56 +974,26 @@ fn stack_swap(
ctx.set_opnd_mapping(StackOpnd(offset1), mapping0);
}
/*
fn gen_putnil(
jit: &mut JITState,
ctx: &mut Context,
cb: &mut CodeBlock,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
jit_putobject(jit, ctx, cb, Qnil);
jit_putobject(jit, ctx, asm, Qnil);
KeepCompiling
}
fn jit_putobject(jit: &mut JITState, ctx: &mut Context, cb: &mut CodeBlock, arg: VALUE) {
fn jit_putobject(jit: &mut JITState, ctx: &mut Context, asm: &mut Assembler, arg: VALUE) {
let val_type: Type = Type::from(arg);
let stack_top = ctx.stack_push(val_type);
if arg.special_const_p() {
// Immediates will not move and do not need to be tracked for GC
// Thanks to this we can mov directly to memory when possible.
let imm = imm_opnd(arg.as_i64());
// 64-bit immediates can't be directly written to memory
if imm.num_bits() <= 32 {
mov(cb, stack_top, imm);
} else {
mov(cb, REG0, imm);
mov(cb, stack_top, REG0);
}
} else {
// Load the value to push into REG0
// Note that this value may get moved by the GC
jit_mov_gc_ptr(jit, cb, REG0, arg);
// Write argument at SP
mov(cb, stack_top, REG0);
}
asm.mov(stack_top, arg.into());
}
fn gen_putobject_int2fix(
jit: &mut JITState,
ctx: &mut Context,
cb: &mut CodeBlock,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let opcode = jit.opcode;
@ -1006,22 +1003,23 @@ fn gen_putobject_int2fix(
1
};
jit_putobject(jit, ctx, cb, VALUE::fixnum_from_usize(cst_val));
jit_putobject(jit, ctx, asm, VALUE::fixnum_from_usize(cst_val));
KeepCompiling
}
fn gen_putobject(
jit: &mut JITState,
ctx: &mut Context,
cb: &mut CodeBlock,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let arg: VALUE = jit_get_arg(jit, 0);
jit_putobject(jit, ctx, cb, arg);
jit_putobject(jit, ctx, asm, arg);
KeepCompiling
}
/*
fn gen_putself(
_jit: &mut JITState,
ctx: &mut Context,
@ -5250,48 +5248,53 @@ fn gen_invokesuper(
_ => unreachable!(),
}
}
*/
fn gen_leave(
jit: &mut JITState,
ctx: &mut Context,
cb: &mut CodeBlock,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Only the return value should be on the stack
assert!(ctx.get_stack_size() == 1);
// FIXME
/*
// Create a side-exit to fall back to the interpreter
let side_exit = get_side_exit(jit, ocb, ctx);
// Load environment pointer EP from CFP
mov(cb, REG1, mem_opnd(64, REG_CFP, RUBY_OFFSET_CFP_EP));
//let side_exit = get_side_exit(jit, ocb, ctx);
// Check for interrupts
add_comment(cb, "check for interrupts");
gen_check_ints(cb, counted_exit!(ocb, side_exit, leave_se_interrupt));
// Load the return value
mov(cb, REG0, ctx.stack_pop(1));
//gen_check_ints(cb, counted_exit!(ocb, side_exit, leave_se_interrupt));
*/
// Pop the current frame (ec->cfp++)
// Note: the return PC is already in the previous CFP
add_comment(cb, "pop stack frame");
add(cb, REG_CFP, uimm_opnd(RUBY_SIZEOF_CONTROL_FRAME as u64));
mov(cb, mem_opnd(64, REG_EC, RUBY_OFFSET_EC_CFP), REG_CFP);
asm.comment("pop stack frame");
let incr_cfp = asm.add(CFP, RUBY_SIZEOF_CONTROL_FRAME.into());
asm.mov(CFP, incr_cfp);
asm.mov(Opnd::mem(64, EC, RUBY_OFFSET_EC_CFP), incr_cfp);
// Load the return value
let retval_opnd = ctx.stack_pop(1);
// Move the return value into the C return register for gen_leave_exit()
asm.mov(C_RET_OPND, retval_opnd);
// Reload REG_SP for the caller and write the return value.
// Top of the stack is REG_SP[0] since the caller has sp_offset=1.
mov(cb, REG_SP, mem_opnd(64, REG_CFP, RUBY_OFFSET_CFP_SP));
mov(cb, mem_opnd(64, REG_SP, 0), REG0);
asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
asm.mov(Opnd::mem(64, SP, 0), C_RET_OPND);
// Jump to the JIT return address on the frame that was just popped
let offset_to_jit_return =
-(RUBY_SIZEOF_CONTROL_FRAME as i32) + (RUBY_OFFSET_CFP_JIT_RETURN as i32);
jmp_rm(cb, mem_opnd(64, REG_CFP, offset_to_jit_return));
asm.jmp_opnd(Opnd::mem(64, CFP, offset_to_jit_return));
EndBlock
}
/*
fn gen_getglobal(
jit: &mut JITState,
ctx: &mut Context,
@ -5958,21 +5961,21 @@ fn get_gen_fn(opcode: VALUE) -> Option<InsnGenFn> {
match opcode {
YARVINSN_nop => Some(gen_nop),
//YARVINSN_pop => Some(gen_pop),
YARVINSN_pop => Some(gen_pop),
YARVINSN_dup => Some(gen_dup),
YARVINSN_dupn => Some(gen_dupn),
YARVINSN_swap => Some(gen_swap),
/*
YARVINSN_putnil => Some(gen_putnil),
YARVINSN_putobject => Some(gen_putobject),
YARVINSN_putobject_INT2FIX_0_ => Some(gen_putobject_int2fix),
YARVINSN_putobject_INT2FIX_1_ => Some(gen_putobject_int2fix),
YARVINSN_putself => Some(gen_putself),
YARVINSN_putspecialobject => Some(gen_putspecialobject),
YARVINSN_setn => Some(gen_setn),
YARVINSN_topn => Some(gen_topn),
YARVINSN_adjuststack => Some(gen_adjuststack),
//YARVINSN_putself => Some(gen_putself),
//YARVINSN_putspecialobject => Some(gen_putspecialobject),
//YARVINSN_setn => Some(gen_setn),
//YARVINSN_topn => Some(gen_topn),
//YARVINSN_adjuststack => Some(gen_adjuststack),
/*
YARVINSN_getlocal => Some(gen_getlocal),
YARVINSN_getlocal_WC_0 => Some(gen_getlocal_wc0),
YARVINSN_getlocal_WC_1 => Some(gen_getlocal_wc1),
@ -6028,14 +6031,16 @@ fn get_gen_fn(opcode: VALUE) -> Option<InsnGenFn> {
YARVINSN_branchunless => Some(gen_branchunless),
YARVINSN_branchnil => Some(gen_branchnil),
YARVINSN_jump => Some(gen_jump),
*/
YARVINSN_getblockparamproxy => Some(gen_getblockparamproxy),
YARVINSN_getblockparam => Some(gen_getblockparam),
YARVINSN_opt_send_without_block => Some(gen_opt_send_without_block),
YARVINSN_send => Some(gen_send),
YARVINSN_invokesuper => Some(gen_invokesuper),
//YARVINSN_getblockparamproxy => Some(gen_getblockparamproxy),
//YARVINSN_getblockparam => Some(gen_getblockparam),
//YARVINSN_opt_send_without_block => Some(gen_opt_send_without_block),
//YARVINSN_send => Some(gen_send),
//YARVINSN_invokesuper => Some(gen_invokesuper),
YARVINSN_leave => Some(gen_leave),
/*
YARVINSN_getglobal => Some(gen_getglobal),
YARVINSN_setglobal => Some(gen_setglobal),
YARVINSN_anytostring => Some(gen_anytostring),
@ -6176,10 +6181,10 @@ impl CodegenGlobals {
let leave_exit_code = gen_leave_exit(&mut ocb);
let stub_exit_code = gen_code_for_exit_from_stub(&mut ocb);
//let stub_exit_code = gen_code_for_exit_from_stub(&mut ocb);
// Generate full exit code for C func
let cfunc_exit_code = gen_full_cfunc_return(&mut ocb);
//let cfunc_exit_code = gen_full_cfunc_return(&mut ocb);
// Mark all code memory as executable
cb.mark_all_executable();
@ -6189,8 +6194,8 @@ impl CodegenGlobals {
inline_cb: cb,
outlined_cb: ocb,
leave_exit_code: leave_exit_code,
stub_exit_code: stub_exit_code,
outline_full_cfunc_return_pos: cfunc_exit_code,
stub_exit_code: /*stub_exit_code*/CodePtr::from(1 as *mut u8),
outline_full_cfunc_return_pos: /*cfunc_exit_code*/CodePtr::from(1 as *mut u8),
global_inval_patches: Vec::new(),
inline_frozen_bytes: 0,
method_codegen_table: HashMap::new(),