YJIT: Optimize local variables when EP == BP (take 2) (#10607)

* Revert "Revert "YJIT: Optimize local variables when EP == BP" (#10584)"

This reverts commit c878344195.

* YJIT: Take care of GC references in ISEQ invariants

Co-authored-by: Alan Wu <alansi.xingwu@shopify.com>

---------

Co-authored-by: Alan Wu <alansi.xingwu@shopify.com>
This commit is contained in:
Takashi Kokubun 2024-04-25 07:04:53 -07:00 коммит произвёл GitHub
Родитель f248e1008a
Коммит 7ab1a608e7
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
11 изменённых файлов: 222 добавлений и 32 удалений

Просмотреть файл

@ -2317,6 +2317,19 @@ assert_equal '123', %q{
foo(Foo) foo(Foo)
} }
# Test EP == BP invalidation with moving ISEQs
assert_equal 'ok', %q{
def entry
ok = proc { :ok } # set #entry as an EP-escaping ISEQ
[nil].reverse_each do # avoid exiting the JIT frame on the constant
GC.compact # move #entry ISEQ
end
ok # should be read off of escaped EP
end
entry.call
}
# invokesuper edge case # invokesuper edge case
assert_equal '[:A, [:A, :B]]', %q{ assert_equal '[:A, [:A, :B]]', %q{
class B class B

4
iseq.c
Просмотреть файл

@ -167,7 +167,7 @@ rb_iseq_free(const rb_iseq_t *iseq)
struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq); struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
rb_rjit_free_iseq(iseq); /* Notify RJIT */ rb_rjit_free_iseq(iseq); /* Notify RJIT */
#if USE_YJIT #if USE_YJIT
rb_yjit_iseq_free(body->yjit_payload); rb_yjit_iseq_free(iseq);
if (FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED)) { if (FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED)) {
RUBY_ASSERT(rb_yjit_live_iseq_count > 0); RUBY_ASSERT(rb_yjit_live_iseq_count > 0);
rb_yjit_live_iseq_count--; rb_yjit_live_iseq_count--;
@ -377,7 +377,7 @@ rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating)
rb_rjit_iseq_update_references(body); rb_rjit_iseq_update_references(body);
#endif #endif
#if USE_YJIT #if USE_YJIT
rb_yjit_iseq_update_references(body->yjit_payload); rb_yjit_iseq_update_references(iseq);
#endif #endif
} }
else { else {

5
vm.c
Просмотреть файл

@ -1007,6 +1007,11 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co
} }
#endif #endif
// Invalidate JIT code that assumes cfp->ep == vm_base_ptr(cfp).
if (env->iseq) {
rb_yjit_invalidate_ep_is_bp(env->iseq);
}
return (VALUE)env; return (VALUE)env;
} }

6
yjit.c
Просмотреть файл

@ -629,6 +629,12 @@ rb_get_iseq_body_stack_max(const rb_iseq_t *iseq)
return iseq->body->stack_max; return iseq->body->stack_max;
} }
enum rb_iseq_type
rb_get_iseq_body_type(const rb_iseq_t *iseq)
{
return iseq->body->type;
}
bool bool
rb_get_iseq_flags_has_lead(const rb_iseq_t *iseq) rb_get_iseq_flags_has_lead(const rb_iseq_t *iseq)
{ {

10
yjit.h
Просмотреть файл

@ -40,14 +40,15 @@ void rb_yjit_init(bool yjit_enabled);
void rb_yjit_bop_redefined(int redefined_flag, enum ruby_basic_operators bop); void rb_yjit_bop_redefined(int redefined_flag, enum ruby_basic_operators bop);
void rb_yjit_constant_state_changed(ID id); void rb_yjit_constant_state_changed(ID id);
void rb_yjit_iseq_mark(void *payload); void rb_yjit_iseq_mark(void *payload);
void rb_yjit_iseq_update_references(void *payload); void rb_yjit_iseq_update_references(const rb_iseq_t *iseq);
void rb_yjit_iseq_free(void *payload); void rb_yjit_iseq_free(const rb_iseq_t *iseq);
void rb_yjit_before_ractor_spawn(void); void rb_yjit_before_ractor_spawn(void);
void rb_yjit_constant_ic_update(const rb_iseq_t *const iseq, IC ic, unsigned insn_idx); void rb_yjit_constant_ic_update(const rb_iseq_t *const iseq, IC ic, unsigned insn_idx);
void rb_yjit_tracing_invalidate_all(void); void rb_yjit_tracing_invalidate_all(void);
void rb_yjit_show_usage(int help, int highlight, unsigned int width, int columns); void rb_yjit_show_usage(int help, int highlight, unsigned int width, int columns);
void rb_yjit_lazy_push_frame(const VALUE *pc); void rb_yjit_lazy_push_frame(const VALUE *pc);
void rb_yjit_invalidate_no_singleton_class(VALUE klass); void rb_yjit_invalidate_no_singleton_class(VALUE klass);
void rb_yjit_invalidate_ep_is_bp(const rb_iseq_t *iseq);
#else #else
// !USE_YJIT // !USE_YJIT
@ -64,13 +65,14 @@ static inline void rb_yjit_init(bool yjit_enabled) {}
static inline void rb_yjit_bop_redefined(int redefined_flag, enum ruby_basic_operators bop) {} static inline void rb_yjit_bop_redefined(int redefined_flag, enum ruby_basic_operators bop) {}
static inline void rb_yjit_constant_state_changed(ID id) {} static inline void rb_yjit_constant_state_changed(ID id) {}
static inline void rb_yjit_iseq_mark(void *payload) {} static inline void rb_yjit_iseq_mark(void *payload) {}
static inline void rb_yjit_iseq_update_references(void *payload) {} static inline void rb_yjit_iseq_update_references(const rb_iseq_t *iseq) {}
static inline void rb_yjit_iseq_free(void *payload) {} static inline void rb_yjit_iseq_free(const rb_iseq_t *iseq) {}
static inline void rb_yjit_before_ractor_spawn(void) {} static inline void rb_yjit_before_ractor_spawn(void) {}
static inline void rb_yjit_constant_ic_update(const rb_iseq_t *const iseq, IC ic, unsigned insn_idx) {} static inline void rb_yjit_constant_ic_update(const rb_iseq_t *const iseq, IC ic, unsigned insn_idx) {}
static inline void rb_yjit_tracing_invalidate_all(void) {} static inline void rb_yjit_tracing_invalidate_all(void) {}
static inline void rb_yjit_lazy_push_frame(const VALUE *pc) {} static inline void rb_yjit_lazy_push_frame(const VALUE *pc) {}
static inline void rb_yjit_invalidate_no_singleton_class(VALUE klass) {} static inline void rb_yjit_invalidate_no_singleton_class(VALUE klass) {}
static inline void rb_yjit_invalidate_ep_is_bp(const rb_iseq_t *iseq) {}
#endif // #if USE_YJIT #endif // #if USE_YJIT

Просмотреть файл

@ -299,6 +299,7 @@ fn main() {
.allowlist_type("ruby_tag_type") .allowlist_type("ruby_tag_type")
.allowlist_type("ruby_vm_throw_flags") .allowlist_type("ruby_vm_throw_flags")
.allowlist_type("vm_check_match_type") .allowlist_type("vm_check_match_type")
.allowlist_type("rb_iseq_type")
// From yjit.c // From yjit.c
.allowlist_function("rb_iseq_(get|set)_yjit_payload") .allowlist_function("rb_iseq_(get|set)_yjit_payload")
@ -416,6 +417,7 @@ fn main() {
.allowlist_function("rb_get_iseq_body_parent_iseq") .allowlist_function("rb_get_iseq_body_parent_iseq")
.allowlist_function("rb_get_iseq_body_iseq_encoded") .allowlist_function("rb_get_iseq_body_iseq_encoded")
.allowlist_function("rb_get_iseq_body_stack_max") .allowlist_function("rb_get_iseq_body_stack_max")
.allowlist_function("rb_get_iseq_body_type")
.allowlist_function("rb_get_iseq_flags_has_lead") .allowlist_function("rb_get_iseq_flags_has_lead")
.allowlist_function("rb_get_iseq_flags_has_opt") .allowlist_function("rb_get_iseq_flags_has_opt")
.allowlist_function("rb_get_iseq_flags_has_kw") .allowlist_function("rb_get_iseq_flags_has_kw")

Просмотреть файл

@ -46,7 +46,7 @@ type InsnGenFn = fn(
/// Represents a [core::Block] while we build it. /// Represents a [core::Block] while we build it.
pub struct JITState { pub struct JITState {
/// Instruction sequence for the compiling block /// Instruction sequence for the compiling block
iseq: IseqPtr, pub iseq: IseqPtr,
/// The iseq index of the first instruction in the block /// The iseq index of the first instruction in the block
starting_insn_idx: IseqIdx, starting_insn_idx: IseqIdx,
@ -101,6 +101,9 @@ pub struct JITState {
/// A list of classes that are not supposed to have a singleton class. /// A list of classes that are not supposed to have a singleton class.
pub no_singleton_class_assumptions: Vec<VALUE>, pub no_singleton_class_assumptions: Vec<VALUE>,
/// When true, the block is valid only when base pointer is equal to environment pointer.
pub no_ep_escape: bool,
/// When true, the block is valid only when there is a total of one ractor running /// When true, the block is valid only when there is a total of one ractor running
pub block_assumes_single_ractor: bool, pub block_assumes_single_ractor: bool,
@ -130,6 +133,7 @@ impl JITState {
bop_assumptions: vec![], bop_assumptions: vec![],
stable_constant_names_assumption: None, stable_constant_names_assumption: None,
no_singleton_class_assumptions: vec![], no_singleton_class_assumptions: vec![],
no_ep_escape: false,
block_assumes_single_ractor: false, block_assumes_single_ractor: false,
perf_map: Rc::default(), perf_map: Rc::default(),
perf_stack: vec![], perf_stack: vec![],
@ -171,6 +175,23 @@ impl JITState {
unsafe { *(self.pc.offset(arg_idx + 1)) } unsafe { *(self.pc.offset(arg_idx + 1)) }
} }
/// Return true if the current ISEQ could escape an environment.
///
/// As of vm_push_frame(), EP is always equal to BP. However, after pushing
/// a frame, some ISEQ setups call vm_bind_update_env(), which redirects EP.
/// Also, some method calls escape the environment to the heap.
fn escapes_ep(&self) -> bool {
match unsafe { get_iseq_body_type(self.iseq) } {
// <main> frame is always associated to TOPLEVEL_BINDING.
ISEQ_TYPE_MAIN |
// Kernel#eval uses a heap EP when a Binding argument is not nil.
ISEQ_TYPE_EVAL => true,
// If this ISEQ has previously escaped EP, give up the optimization.
_ if iseq_escapes_ep(self.iseq) => true,
_ => false,
}
}
// Get the index of the next instruction // Get the index of the next instruction
fn next_insn_idx(&self) -> u16 { fn next_insn_idx(&self) -> u16 {
self.insn_idx + insn_len(self.get_opcode()) as u16 self.insn_idx + insn_len(self.get_opcode()) as u16
@ -250,6 +271,19 @@ impl JITState {
true true
} }
/// Assume that base pointer is equal to environment pointer in the current ISEQ.
/// Return true if it's safe to assume so.
fn assume_no_ep_escape(&mut self, asm: &mut Assembler, ocb: &mut OutlinedCb) -> bool {
if jit_ensure_block_entry_exit(self, asm, ocb).is_none() {
return false; // out of space, give up
}
if self.escapes_ep() {
return false; // EP has been escaped in this ISEQ. disable the optimization to avoid an invalidation loop.
}
self.no_ep_escape = true;
true
}
fn get_cfp(&self) -> *mut rb_control_frame_struct { fn get_cfp(&self) -> *mut rb_control_frame_struct {
unsafe { get_ec_cfp(self.ec) } unsafe { get_ec_cfp(self.ec) }
} }
@ -2203,16 +2237,22 @@ fn gen_get_lep(jit: &JITState, asm: &mut Assembler) -> Opnd {
fn gen_getlocal_generic( fn gen_getlocal_generic(
jit: &mut JITState, jit: &mut JITState,
asm: &mut Assembler, asm: &mut Assembler,
ocb: &mut OutlinedCb,
ep_offset: u32, ep_offset: u32,
level: u32, level: u32,
) -> Option<CodegenStatus> { ) -> Option<CodegenStatus> {
// Load environment pointer EP (level 0) from CFP let local_opnd = if level == 0 && jit.assume_no_ep_escape(asm, ocb) {
let ep_opnd = gen_get_ep(asm, level); // Load the local using SP register
asm.ctx.ep_opnd(-(ep_offset as i32))
} else {
// Load environment pointer EP (level 0) from CFP
let ep_opnd = gen_get_ep(asm, level);
// Load the local from the block // Load the local from the block
// val = *(vm_get_ep(GET_EP(), level) - idx); // val = *(vm_get_ep(GET_EP(), level) - idx);
let offs = -(SIZEOF_VALUE_I32 * ep_offset as i32); let offs = -(SIZEOF_VALUE_I32 * ep_offset as i32);
let local_opnd = Opnd::mem(64, ep_opnd, offs); Opnd::mem(64, ep_opnd, offs)
};
// Write the local at SP // Write the local at SP
let stack_top = if level == 0 { let stack_top = if level == 0 {
@ -2230,29 +2270,29 @@ fn gen_getlocal_generic(
fn gen_getlocal( fn gen_getlocal(
jit: &mut JITState, jit: &mut JITState,
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> { ) -> Option<CodegenStatus> {
let idx = jit.get_arg(0).as_u32(); let idx = jit.get_arg(0).as_u32();
let level = jit.get_arg(1).as_u32(); let level = jit.get_arg(1).as_u32();
gen_getlocal_generic(jit, asm, idx, level) gen_getlocal_generic(jit, asm, ocb, idx, level)
} }
fn gen_getlocal_wc0( fn gen_getlocal_wc0(
jit: &mut JITState, jit: &mut JITState,
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> { ) -> Option<CodegenStatus> {
let idx = jit.get_arg(0).as_u32(); let idx = jit.get_arg(0).as_u32();
gen_getlocal_generic(jit, asm, idx, 0) gen_getlocal_generic(jit, asm, ocb, idx, 0)
} }
fn gen_getlocal_wc1( fn gen_getlocal_wc1(
jit: &mut JITState, jit: &mut JITState,
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> { ) -> Option<CodegenStatus> {
let idx = jit.get_arg(0).as_u32(); let idx = jit.get_arg(0).as_u32();
gen_getlocal_generic(jit, asm, idx, 1) gen_getlocal_generic(jit, asm, ocb, idx, 1)
} }
fn gen_setlocal_generic( fn gen_setlocal_generic(
@ -2264,11 +2304,11 @@ fn gen_setlocal_generic(
) -> Option<CodegenStatus> { ) -> Option<CodegenStatus> {
let value_type = asm.ctx.get_opnd_type(StackOpnd(0)); let value_type = asm.ctx.get_opnd_type(StackOpnd(0));
// Load environment pointer EP at level
let ep_opnd = gen_get_ep(asm, level);
// Fallback because of write barrier // Fallback because of write barrier
if asm.ctx.get_chain_depth() > 0 { if asm.ctx.get_chain_depth() > 0 {
// Load environment pointer EP at level
let ep_opnd = gen_get_ep(asm, level);
// This function should not yield to the GC. // This function should not yield to the GC.
// void rb_vm_env_write(const VALUE *ep, int index, VALUE v) // void rb_vm_env_write(const VALUE *ep, int index, VALUE v)
let index = -(ep_offset as i64); let index = -(ep_offset as i64);
@ -2286,16 +2326,27 @@ fn gen_setlocal_generic(
return Some(KeepCompiling); return Some(KeepCompiling);
} }
// Write barriers may be required when VM_ENV_FLAG_WB_REQUIRED is set, however write barriers let (flags_opnd, local_opnd) = if level == 0 && jit.assume_no_ep_escape(asm, ocb) {
// only affect heap objects being written. If we know an immediate value is being written we // Load flags and the local using SP register
// can skip this check. let local_opnd = asm.ctx.ep_opnd(-(ep_offset as i32));
if !value_type.is_imm() { let flags_opnd = asm.ctx.ep_opnd(VM_ENV_DATA_INDEX_FLAGS as i32);
// flags & VM_ENV_FLAG_WB_REQUIRED (flags_opnd, local_opnd)
} else {
// Load flags and the local for the level
let ep_opnd = gen_get_ep(asm, level);
let flags_opnd = Opnd::mem( let flags_opnd = Opnd::mem(
64, 64,
ep_opnd, ep_opnd,
SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_FLAGS as i32, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_FLAGS as i32,
); );
(flags_opnd, Opnd::mem(64, ep_opnd, -SIZEOF_VALUE_I32 * ep_offset as i32))
};
// Write barriers may be required when VM_ENV_FLAG_WB_REQUIRED is set, however write barriers
// only affect heap objects being written. If we know an immediate value is being written we
// can skip this check.
if !value_type.is_imm() {
// flags & VM_ENV_FLAG_WB_REQUIRED
asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into()); asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into());
// if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0 // if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
@ -2319,8 +2370,7 @@ fn gen_setlocal_generic(
let stack_top = asm.stack_pop(1); let stack_top = asm.stack_pop(1);
// Write the value at the environment pointer // Write the value at the environment pointer
let offs = -(SIZEOF_VALUE_I32 * ep_offset as i32); asm.mov(local_opnd, stack_top);
asm.mov(Opnd::mem(64, ep_opnd, offs), stack_top);
Some(KeepCompiling) Some(KeepCompiling)
} }

Просмотреть файл

@ -1138,8 +1138,12 @@ pub fn for_each_off_stack_iseq_payload<F: FnMut(&mut IseqPayload)>(mut callback:
/// Free the per-iseq payload /// Free the per-iseq payload
#[no_mangle] #[no_mangle]
pub extern "C" fn rb_yjit_iseq_free(payload: *mut c_void) { pub extern "C" fn rb_yjit_iseq_free(iseq: IseqPtr) {
// Free invariants for the ISEQ
iseq_free_invariants(iseq);
let payload = { let payload = {
let payload = unsafe { rb_iseq_get_yjit_payload(iseq) };
if payload.is_null() { if payload.is_null() {
// Nothing to free. // Nothing to free.
return; return;
@ -1266,7 +1270,11 @@ pub extern "C" fn rb_yjit_iseq_mark(payload: *mut c_void) {
/// GC callback for updating GC objects in the per-iseq payload. /// GC callback for updating GC objects in the per-iseq payload.
/// This is a mirror of [rb_yjit_iseq_mark]. /// This is a mirror of [rb_yjit_iseq_mark].
#[no_mangle] #[no_mangle]
pub extern "C" fn rb_yjit_iseq_update_references(payload: *mut c_void) { pub extern "C" fn rb_yjit_iseq_update_references(iseq: IseqPtr) {
// Update ISEQ references in invariants
iseq_update_references_in_invariants(iseq);
let payload = unsafe { rb_iseq_get_yjit_payload(iseq) };
let payload = if payload.is_null() { let payload = if payload.is_null() {
// Nothing to update. // Nothing to update.
return; return;
@ -1657,6 +1665,9 @@ impl JITState {
for klass in self.no_singleton_class_assumptions { for klass in self.no_singleton_class_assumptions {
track_no_singleton_class_assumption(blockref, klass); track_no_singleton_class_assumption(blockref, klass);
} }
if self.no_ep_escape {
track_no_ep_escape_assumption(blockref, self.iseq);
}
blockref blockref
} }
@ -1798,6 +1809,13 @@ impl Context {
return Opnd::mem(64, SP, offset); return Opnd::mem(64, SP, offset);
} }
/// Get an operand for the adjusted environment pointer address using SP register.
/// This is valid only when a Binding object hasn't been created for the frame.
pub fn ep_opnd(&self, offset: i32) -> Opnd {
let ep_offset = self.get_stack_size() as i32 + 1;
self.sp_opnd(-ep_offset + offset)
}
/// Stop using a register for a given stack temp. /// Stop using a register for a given stack temp.
/// This allows us to reuse the register for a value that we know is dead /// This allows us to reuse the register for a value that we know is dead
/// and will no longer be used (e.g. popped stack temp). /// and will no longer be used (e.g. popped stack temp).
@ -3130,6 +3148,12 @@ pub fn defer_compilation(
// Likely a stub due to the increased chain depth // Likely a stub due to the increased chain depth
let target0_address = branch.set_target(0, blockid, &next_ctx, ocb); let target0_address = branch.set_target(0, blockid, &next_ctx, ocb);
// Pad the block if it has the potential to be invalidated. This must be
// done before gen_fn() in case the jump is overwritten by a fallthrough.
if jit.block_entry_exit.is_some() {
asm.pad_inval_patch();
}
// Call the branch generation function // Call the branch generation function
asm_comment!(asm, "defer_compilation"); asm_comment!(asm, "defer_compilation");
asm.mark_branch_start(&branch); asm.mark_branch_start(&branch);
@ -3313,9 +3337,10 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
assert!( assert!(
cb.get_write_ptr() <= block_end, cb.get_write_ptr() <= block_end,
"invalidation wrote past end of block (code_size: {:?}, new_size: {})", "invalidation wrote past end of block (code_size: {:?}, new_size: {}, start_addr: {:?})",
block.code_size(), block.code_size(),
cb.get_write_ptr().as_offset() - block_start.as_offset(), cb.get_write_ptr().as_offset() - block_start.as_offset(),
block.start_addr.raw_ptr(cb),
); );
cb.set_write_ptr(cur_pos); cb.set_write_ptr(cur_pos);
cb.set_dropped_bytes(cur_dropped_bytes); cb.set_dropped_bytes(cur_dropped_bytes);

Просмотреть файл

@ -170,6 +170,7 @@ pub use rb_iseq_encoded_size as get_iseq_encoded_size;
pub use rb_get_iseq_body_local_iseq as get_iseq_body_local_iseq; pub use rb_get_iseq_body_local_iseq as get_iseq_body_local_iseq;
pub use rb_get_iseq_body_iseq_encoded as get_iseq_body_iseq_encoded; pub use rb_get_iseq_body_iseq_encoded as get_iseq_body_iseq_encoded;
pub use rb_get_iseq_body_stack_max as get_iseq_body_stack_max; pub use rb_get_iseq_body_stack_max as get_iseq_body_stack_max;
pub use rb_get_iseq_body_type as get_iseq_body_type;
pub use rb_get_iseq_flags_has_lead as get_iseq_flags_has_lead; pub use rb_get_iseq_flags_has_lead as get_iseq_flags_has_lead;
pub use rb_get_iseq_flags_has_opt as get_iseq_flags_has_opt; pub use rb_get_iseq_flags_has_opt as get_iseq_flags_has_opt;
pub use rb_get_iseq_flags_has_kw as get_iseq_flags_has_kw; pub use rb_get_iseq_flags_has_kw as get_iseq_flags_has_kw;

Просмотреть файл

@ -478,6 +478,16 @@ pub struct iseq_inline_iv_cache_entry {
pub struct iseq_inline_cvar_cache_entry { pub struct iseq_inline_cvar_cache_entry {
pub entry: *mut rb_cvar_class_tbl_entry, pub entry: *mut rb_cvar_class_tbl_entry,
} }
pub const ISEQ_TYPE_TOP: rb_iseq_type = 0;
pub const ISEQ_TYPE_METHOD: rb_iseq_type = 1;
pub const ISEQ_TYPE_BLOCK: rb_iseq_type = 2;
pub const ISEQ_TYPE_CLASS: rb_iseq_type = 3;
pub const ISEQ_TYPE_RESCUE: rb_iseq_type = 4;
pub const ISEQ_TYPE_ENSURE: rb_iseq_type = 5;
pub const ISEQ_TYPE_EVAL: rb_iseq_type = 6;
pub const ISEQ_TYPE_MAIN: rb_iseq_type = 7;
pub const ISEQ_TYPE_PLAIN: rb_iseq_type = 8;
pub type rb_iseq_type = u32;
pub const BUILTIN_ATTR_LEAF: rb_builtin_attr = 1; pub const BUILTIN_ATTR_LEAF: rb_builtin_attr = 1;
pub const BUILTIN_ATTR_SINGLE_NOARG_LEAF: rb_builtin_attr = 2; pub const BUILTIN_ATTR_SINGLE_NOARG_LEAF: rb_builtin_attr = 2;
pub const BUILTIN_ATTR_INLINE_BLOCK: rb_builtin_attr = 4; pub const BUILTIN_ATTR_INLINE_BLOCK: rb_builtin_attr = 4;
@ -1154,6 +1164,7 @@ extern "C" {
pub fn rb_get_iseq_body_local_table_size(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint; pub fn rb_get_iseq_body_local_table_size(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint;
pub fn rb_get_iseq_body_iseq_encoded(iseq: *const rb_iseq_t) -> *mut VALUE; pub fn rb_get_iseq_body_iseq_encoded(iseq: *const rb_iseq_t) -> *mut VALUE;
pub fn rb_get_iseq_body_stack_max(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint; pub fn rb_get_iseq_body_stack_max(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint;
pub fn rb_get_iseq_body_type(iseq: *const rb_iseq_t) -> rb_iseq_type;
pub fn rb_get_iseq_flags_has_lead(iseq: *const rb_iseq_t) -> bool; pub fn rb_get_iseq_flags_has_lead(iseq: *const rb_iseq_t) -> bool;
pub fn rb_get_iseq_flags_has_opt(iseq: *const rb_iseq_t) -> bool; pub fn rb_get_iseq_flags_has_opt(iseq: *const rb_iseq_t) -> bool;
pub fn rb_get_iseq_flags_has_kw(iseq: *const rb_iseq_t) -> bool; pub fn rb_get_iseq_flags_has_kw(iseq: *const rb_iseq_t) -> bool;

Просмотреть файл

@ -59,6 +59,11 @@ pub struct Invariants {
/// there has been a singleton class for the class after boot, so you cannot /// there has been a singleton class for the class after boot, so you cannot
/// assume no singleton class going forward. /// assume no singleton class going forward.
no_singleton_classes: HashMap<VALUE, HashSet<BlockRef>>, no_singleton_classes: HashMap<VALUE, HashSet<BlockRef>>,
/// A map from an ISEQ to a set of blocks that assume base pointer is equal
/// to environment pointer. When the set is empty, it means that EP has been
/// escaped in the ISEQ.
no_ep_escape_iseqs: HashMap<IseqPtr, HashSet<BlockRef>>,
} }
/// Private singleton instance of the invariants global struct. /// Private singleton instance of the invariants global struct.
@ -76,6 +81,7 @@ impl Invariants {
constant_state_blocks: HashMap::new(), constant_state_blocks: HashMap::new(),
block_constant_states: HashMap::new(), block_constant_states: HashMap::new(),
no_singleton_classes: HashMap::new(), no_singleton_classes: HashMap::new(),
no_ep_escape_iseqs: HashMap::new(),
}); });
} }
} }
@ -154,6 +160,43 @@ pub fn has_singleton_class_of(klass: VALUE) -> bool {
.map_or(false, |blocks| blocks.is_empty()) .map_or(false, |blocks| blocks.is_empty())
} }
/// Track that a block will assume that base pointer is equal to environment pointer.
pub fn track_no_ep_escape_assumption(uninit_block: BlockRef, iseq: IseqPtr) {
Invariants::get_instance()
.no_ep_escape_iseqs
.entry(iseq)
.or_default()
.insert(uninit_block);
}
/// Returns true if a given ISEQ has previously escaped an environment.
pub fn iseq_escapes_ep(iseq: IseqPtr) -> bool {
Invariants::get_instance()
.no_ep_escape_iseqs
.get(&iseq)
.map_or(false, |blocks| blocks.is_empty())
}
/// Update ISEQ references in invariants on GC compaction
pub fn iseq_update_references_in_invariants(iseq: IseqPtr) {
if unsafe { INVARIANTS.is_none() } {
return;
}
let no_ep_escape_iseqs = &mut Invariants::get_instance().no_ep_escape_iseqs;
if let Some(blocks) = no_ep_escape_iseqs.remove(&iseq) {
let new_iseq = unsafe { rb_gc_location(iseq.into()) }.as_iseq();
no_ep_escape_iseqs.insert(new_iseq, blocks);
}
}
/// Forget an ISEQ remembered in invariants
pub fn iseq_free_invariants(iseq: IseqPtr) {
if unsafe { INVARIANTS.is_none() } {
return;
}
Invariants::get_instance().no_ep_escape_iseqs.remove(&iseq);
}
// Checks rb_method_basic_definition_p and registers the current block for invalidation if method // Checks rb_method_basic_definition_p and registers the current block for invalidation if method
// lookup changes. // lookup changes.
// A "basic method" is one defined during VM boot, so we can use this to check assumptions based on // A "basic method" is one defined during VM boot, so we can use this to check assumptions based on
@ -420,6 +463,10 @@ pub fn block_assumptions_free(blockref: BlockRef) {
for (_, blocks) in invariants.no_singleton_classes.iter_mut() { for (_, blocks) in invariants.no_singleton_classes.iter_mut() {
blocks.remove(&blockref); blocks.remove(&blockref);
} }
// Remove tracking for blocks assuming EP doesn't escape
for (_, blocks) in invariants.no_ep_escape_iseqs.iter_mut() {
blocks.remove(&blockref);
}
} }
/// Callback from the opt_setinlinecache instruction in the interpreter. /// Callback from the opt_setinlinecache instruction in the interpreter.
@ -515,6 +562,34 @@ pub extern "C" fn rb_yjit_invalidate_no_singleton_class(klass: VALUE) {
} }
} }
/// Invalidate blocks for a given ISEQ that assumes environment pointer is
/// equal to base pointer.
#[no_mangle]
pub extern "C" fn rb_yjit_invalidate_ep_is_bp(iseq: IseqPtr) {
// Skip tracking EP escapes on boot. We don't need to invalidate anything during boot.
if unsafe { INVARIANTS.is_none() } {
return;
}
// If an EP escape for this ISEQ is detected for the first time, invalidate all blocks
// associated to the ISEQ.
let no_ep_escape_iseqs = &mut Invariants::get_instance().no_ep_escape_iseqs;
match no_ep_escape_iseqs.get_mut(&iseq) {
Some(blocks) => {
// Invalidate existing blocks and let jit.ep_is_bp()
// return true when they are compiled again
for block in mem::take(blocks) {
invalidate_block_version(&block);
incr_counter!(invalidate_no_singleton_class);
}
}
None => {
// Let jit.ep_is_bp() return false for this ISEQ
no_ep_escape_iseqs.insert(iseq, HashSet::new());
}
}
}
// Invalidate all generated code and patch C method return code to contain // Invalidate all generated code and patch C method return code to contain
// logic for firing the c_return TracePoint event. Once rb_vm_barrier() // logic for firing the c_return TracePoint event. Once rb_vm_barrier()
// returns, all other ractors are pausing inside RB_VM_LOCK_ENTER(), which // returns, all other ractors are pausing inside RB_VM_LOCK_ENTER(), which