зеркало из https://github.com/github/ruby.git
Use shape information in YJIT's definedivar implementation (#7579)
* Use shape information in YJIT's definedivar implementation * Handle complex shape for definedivar
This commit is contained in:
Родитель
b738cb01b6
Коммит
e950781880
|
@ -79,6 +79,53 @@ assert_equal '[nil, nil, nil, nil, nil, nil]', %q{
|
|||
end
|
||||
}
|
||||
|
||||
assert_equal '[nil, nil, nil, nil, nil, nil]', %q{
|
||||
# Tests defined? on non-heap objects
|
||||
[NilClass, TrueClass, FalseClass, Integer, Float, Symbol].each do |klass|
|
||||
klass.class_eval("def foo = defined?(@foo)")
|
||||
end
|
||||
|
||||
[nil, true, false, 0xFABCAFE, 0.42, :cake].map do |instance|
|
||||
instance.foo
|
||||
instance.foo
|
||||
end
|
||||
}
|
||||
|
||||
assert_equal '[nil, "instance-variable", nil, "instance-variable"]', %q{
|
||||
# defined? on object that changes shape between calls
|
||||
class Foo
|
||||
def foo
|
||||
defined?(@foo)
|
||||
end
|
||||
|
||||
def add
|
||||
@foo = 1
|
||||
end
|
||||
|
||||
def remove
|
||||
self.remove_instance_variable(:@foo)
|
||||
end
|
||||
end
|
||||
|
||||
obj = Foo.new
|
||||
[obj.foo, (obj.add; obj.foo), (obj.remove; obj.foo), (obj.add; obj.foo)]
|
||||
}
|
||||
|
||||
assert_equal '["instance-variable", 5]', %q{
|
||||
# defined? on object too complex for shape information
|
||||
class Foo
|
||||
def initialize
|
||||
100.times { |i| instance_variable_set("@foo#{i}", i) }
|
||||
end
|
||||
|
||||
def foo
|
||||
[defined?(@foo5), @foo5]
|
||||
end
|
||||
end
|
||||
|
||||
Foo.new.foo
|
||||
}
|
||||
|
||||
assert_equal '0', %q{
|
||||
# This is a regression test for incomplete invalidation from
|
||||
# opt_setinlinecache. This test might be brittle, so
|
||||
|
|
|
@ -2450,37 +2450,84 @@ fn gen_definedivar(
|
|||
jit: &mut JITState,
|
||||
ctx: &mut Context,
|
||||
asm: &mut Assembler,
|
||||
_ocb: &mut OutlinedCb,
|
||||
ocb: &mut OutlinedCb,
|
||||
) -> CodegenStatus {
|
||||
// Defer compilation so we can specialize base on a runtime receiver
|
||||
if !jit.at_current_insn() {
|
||||
defer_compilation(jit, ctx, asm, ocb);
|
||||
return EndBlock;
|
||||
}
|
||||
|
||||
let ivar_name = jit.get_arg(0).as_u64();
|
||||
// Value that will be pushed on the stack if the ivar is defined. In practice this is always the
|
||||
// string "instance-variable". If the ivar is not defined, nil will be pushed instead.
|
||||
let pushval = jit.get_arg(2);
|
||||
|
||||
// Get the receiver
|
||||
let recv = asm.load(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF));
|
||||
|
||||
// Save the PC and SP because the callee may allocate
|
||||
// Note that this modifies REG_SP, which is why we do it first
|
||||
jit_prepare_routine_call(jit, ctx, asm);
|
||||
// Specialize base on compile time values
|
||||
let comptime_receiver = jit.peek_at_self();
|
||||
|
||||
// Call rb_ivar_defined(recv, ivar_name)
|
||||
let def_result = asm.ccall(rb_ivar_defined as *const u8, vec![recv.into(), ivar_name.into()]);
|
||||
if comptime_receiver.shape_too_complex() {
|
||||
// Fall back to calling rb_ivar_defined
|
||||
|
||||
// if (rb_ivar_defined(recv, ivar_name)) {
|
||||
// val = pushval;
|
||||
// }
|
||||
asm.test(def_result, Opnd::UImm(255));
|
||||
let out_value = asm.csel_nz(pushval.into(), Qnil.into());
|
||||
// Save the PC and SP because the callee may allocate
|
||||
// Note that this modifies REG_SP, which is why we do it first
|
||||
jit_prepare_routine_call(jit, ctx, asm);
|
||||
|
||||
// Push the return value onto the stack
|
||||
let out_type = if pushval.special_const_p() {
|
||||
Type::UnknownImm
|
||||
} else {
|
||||
Type::Unknown
|
||||
// Call rb_ivar_defined(recv, ivar_name)
|
||||
let def_result = asm.ccall(rb_ivar_defined as *const u8, vec![recv.into(), ivar_name.into()]);
|
||||
|
||||
// if (rb_ivar_defined(recv, ivar_name)) {
|
||||
// val = pushval;
|
||||
// }
|
||||
asm.test(def_result, Opnd::UImm(255));
|
||||
let out_value = asm.csel_nz(pushval.into(), Qnil.into());
|
||||
|
||||
// Push the return value onto the stack
|
||||
let out_type = if pushval.special_const_p() { Type::UnknownImm } else { Type::Unknown };
|
||||
let stack_ret = ctx.stack_push(out_type);
|
||||
asm.mov(stack_ret, out_value);
|
||||
|
||||
return KeepCompiling
|
||||
}
|
||||
|
||||
let shape_id = comptime_receiver.shape_id_of();
|
||||
let ivar_exists = unsafe {
|
||||
let shape = rb_shape_get_shape_by_id(shape_id);
|
||||
let mut ivar_index: u32 = 0;
|
||||
rb_shape_get_iv_index(shape, ivar_name, &mut ivar_index)
|
||||
};
|
||||
let stack_ret = ctx.stack_push(out_type);
|
||||
asm.mov(stack_ret, out_value);
|
||||
|
||||
KeepCompiling
|
||||
let side_exit = get_side_exit(jit, ocb, ctx);
|
||||
|
||||
// Guard heap object (recv_opnd must be used before stack_pop)
|
||||
guard_object_is_heap(ctx, asm, recv, SelfOpnd, side_exit);
|
||||
|
||||
let shape_id_offset = unsafe { rb_shape_id_offset() };
|
||||
let shape_opnd = Opnd::mem(SHAPE_ID_NUM_BITS as u8, recv, shape_id_offset);
|
||||
|
||||
asm.comment("guard shape");
|
||||
asm.cmp(shape_opnd, Opnd::UImm(shape_id as u64));
|
||||
let megamorphic_side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic);
|
||||
jit_chain_guard(
|
||||
JCC_JNE,
|
||||
jit,
|
||||
ctx,
|
||||
asm,
|
||||
ocb,
|
||||
GET_IVAR_MAX_DEPTH,
|
||||
megamorphic_side_exit,
|
||||
);
|
||||
|
||||
let result = if ivar_exists { pushval } else { Qnil };
|
||||
jit_putobject(jit, ctx, asm, result);
|
||||
|
||||
// Jump to next instruction. This allows guard chains to share the same successor.
|
||||
jump_to_next_insn(jit, ctx, asm, ocb);
|
||||
|
||||
return EndBlock;
|
||||
}
|
||||
|
||||
fn gen_checktype(
|
||||
|
|
Загрузка…
Ссылка в новой задаче