From 7ab1a608e7413cdb0f93243eb3e6e20a32cec44e Mon Sep 17 00:00:00 2001 From: Takashi Kokubun Date: Thu, 25 Apr 2024 07:04:53 -0700 Subject: [PATCH] YJIT: Optimize local variables when EP == BP (take 2) (#10607) * Revert "Revert "YJIT: Optimize local variables when EP == BP" (#10584)" This reverts commit c8783441952217c18e523749c821f82cd7e5d222. * YJIT: Take care of GC references in ISEQ invariants Co-authored-by: Alan Wu --------- Co-authored-by: Alan Wu --- bootstraptest/test_yjit.rb | 13 +++++ iseq.c | 4 +- vm.c | 5 ++ yjit.c | 6 +++ yjit.h | 10 ++-- yjit/bindgen/src/main.rs | 2 + yjit/src/codegen.rs | 96 ++++++++++++++++++++++++++-------- yjit/src/core.rs | 31 +++++++++-- yjit/src/cruby.rs | 1 + yjit/src/cruby_bindings.inc.rs | 11 ++++ yjit/src/invariants.rs | 75 ++++++++++++++++++++++++++ 11 files changed, 222 insertions(+), 32 deletions(-) diff --git a/bootstraptest/test_yjit.rb b/bootstraptest/test_yjit.rb index 216594f6cc..ae67c91a76 100644 --- a/bootstraptest/test_yjit.rb +++ b/bootstraptest/test_yjit.rb @@ -2317,6 +2317,19 @@ assert_equal '123', %q{ foo(Foo) } +# Test EP == BP invalidation with moving ISEQs +assert_equal 'ok', %q{ + def entry + ok = proc { :ok } # set #entry as an EP-escaping ISEQ + [nil].reverse_each do # avoid exiting the JIT frame on the constant + GC.compact # move #entry ISEQ + end + ok # should be read off of escaped EP + end + + entry.call +} + # invokesuper edge case assert_equal '[:A, [:A, :B]]', %q{ class B diff --git a/iseq.c b/iseq.c index 38f756d9c8..b669c3612d 100644 --- a/iseq.c +++ b/iseq.c @@ -167,7 +167,7 @@ rb_iseq_free(const rb_iseq_t *iseq) struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq); rb_rjit_free_iseq(iseq); /* Notify RJIT */ #if USE_YJIT - rb_yjit_iseq_free(body->yjit_payload); + rb_yjit_iseq_free(iseq); if (FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED)) { RUBY_ASSERT(rb_yjit_live_iseq_count > 0); rb_yjit_live_iseq_count--; @@ -377,7 +377,7 @@ rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating) rb_rjit_iseq_update_references(body); #endif #if USE_YJIT - rb_yjit_iseq_update_references(body->yjit_payload); + rb_yjit_iseq_update_references(iseq); #endif } else { diff --git a/vm.c b/vm.c index 646b92accf..902ac48847 100644 --- a/vm.c +++ b/vm.c @@ -1007,6 +1007,11 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co } #endif + // Invalidate JIT code that assumes cfp->ep == vm_base_ptr(cfp). + if (env->iseq) { + rb_yjit_invalidate_ep_is_bp(env->iseq); + } + return (VALUE)env; } diff --git a/yjit.c b/yjit.c index ffd180429e..8ae010ac40 100644 --- a/yjit.c +++ b/yjit.c @@ -629,6 +629,12 @@ rb_get_iseq_body_stack_max(const rb_iseq_t *iseq) return iseq->body->stack_max; } +enum rb_iseq_type +rb_get_iseq_body_type(const rb_iseq_t *iseq) +{ + return iseq->body->type; +} + bool rb_get_iseq_flags_has_lead(const rb_iseq_t *iseq) { diff --git a/yjit.h b/yjit.h index dde9f750aa..5d1de2df90 100644 --- a/yjit.h +++ b/yjit.h @@ -40,14 +40,15 @@ void rb_yjit_init(bool yjit_enabled); void rb_yjit_bop_redefined(int redefined_flag, enum ruby_basic_operators bop); void rb_yjit_constant_state_changed(ID id); void rb_yjit_iseq_mark(void *payload); -void rb_yjit_iseq_update_references(void *payload); -void rb_yjit_iseq_free(void *payload); +void rb_yjit_iseq_update_references(const rb_iseq_t *iseq); +void rb_yjit_iseq_free(const rb_iseq_t *iseq); void rb_yjit_before_ractor_spawn(void); void rb_yjit_constant_ic_update(const rb_iseq_t *const iseq, IC ic, unsigned insn_idx); void rb_yjit_tracing_invalidate_all(void); void rb_yjit_show_usage(int help, int highlight, unsigned int width, int columns); void rb_yjit_lazy_push_frame(const VALUE *pc); void rb_yjit_invalidate_no_singleton_class(VALUE klass); +void rb_yjit_invalidate_ep_is_bp(const rb_iseq_t *iseq); #else // !USE_YJIT @@ -64,13 +65,14 @@ static inline void rb_yjit_init(bool yjit_enabled) {} static inline void rb_yjit_bop_redefined(int redefined_flag, enum ruby_basic_operators bop) {} static inline void rb_yjit_constant_state_changed(ID id) {} static inline void rb_yjit_iseq_mark(void *payload) {} -static inline void rb_yjit_iseq_update_references(void *payload) {} -static inline void rb_yjit_iseq_free(void *payload) {} +static inline void rb_yjit_iseq_update_references(const rb_iseq_t *iseq) {} +static inline void rb_yjit_iseq_free(const rb_iseq_t *iseq) {} static inline void rb_yjit_before_ractor_spawn(void) {} static inline void rb_yjit_constant_ic_update(const rb_iseq_t *const iseq, IC ic, unsigned insn_idx) {} static inline void rb_yjit_tracing_invalidate_all(void) {} static inline void rb_yjit_lazy_push_frame(const VALUE *pc) {} static inline void rb_yjit_invalidate_no_singleton_class(VALUE klass) {} +static inline void rb_yjit_invalidate_ep_is_bp(const rb_iseq_t *iseq) {} #endif // #if USE_YJIT diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs index c16617b3f0..953ab0ac42 100644 --- a/yjit/bindgen/src/main.rs +++ b/yjit/bindgen/src/main.rs @@ -299,6 +299,7 @@ fn main() { .allowlist_type("ruby_tag_type") .allowlist_type("ruby_vm_throw_flags") .allowlist_type("vm_check_match_type") + .allowlist_type("rb_iseq_type") // From yjit.c .allowlist_function("rb_iseq_(get|set)_yjit_payload") @@ -416,6 +417,7 @@ fn main() { .allowlist_function("rb_get_iseq_body_parent_iseq") .allowlist_function("rb_get_iseq_body_iseq_encoded") .allowlist_function("rb_get_iseq_body_stack_max") + .allowlist_function("rb_get_iseq_body_type") .allowlist_function("rb_get_iseq_flags_has_lead") .allowlist_function("rb_get_iseq_flags_has_opt") .allowlist_function("rb_get_iseq_flags_has_kw") diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs index a927aa9684..a5fc3a10a1 100644 --- a/yjit/src/codegen.rs +++ b/yjit/src/codegen.rs @@ -46,7 +46,7 @@ type InsnGenFn = fn( /// Represents a [core::Block] while we build it. pub struct JITState { /// Instruction sequence for the compiling block - iseq: IseqPtr, + pub iseq: IseqPtr, /// The iseq index of the first instruction in the block starting_insn_idx: IseqIdx, @@ -101,6 +101,9 @@ pub struct JITState { /// A list of classes that are not supposed to have a singleton class. pub no_singleton_class_assumptions: Vec, + /// When true, the block is valid only when base pointer is equal to environment pointer. + pub no_ep_escape: bool, + /// When true, the block is valid only when there is a total of one ractor running pub block_assumes_single_ractor: bool, @@ -130,6 +133,7 @@ impl JITState { bop_assumptions: vec![], stable_constant_names_assumption: None, no_singleton_class_assumptions: vec![], + no_ep_escape: false, block_assumes_single_ractor: false, perf_map: Rc::default(), perf_stack: vec![], @@ -171,6 +175,23 @@ impl JITState { unsafe { *(self.pc.offset(arg_idx + 1)) } } + /// Return true if the current ISEQ could escape an environment. + /// + /// As of vm_push_frame(), EP is always equal to BP. However, after pushing + /// a frame, some ISEQ setups call vm_bind_update_env(), which redirects EP. + /// Also, some method calls escape the environment to the heap. + fn escapes_ep(&self) -> bool { + match unsafe { get_iseq_body_type(self.iseq) } { + //
frame is always associated to TOPLEVEL_BINDING. + ISEQ_TYPE_MAIN | + // Kernel#eval uses a heap EP when a Binding argument is not nil. + ISEQ_TYPE_EVAL => true, + // If this ISEQ has previously escaped EP, give up the optimization. + _ if iseq_escapes_ep(self.iseq) => true, + _ => false, + } + } + // Get the index of the next instruction fn next_insn_idx(&self) -> u16 { self.insn_idx + insn_len(self.get_opcode()) as u16 @@ -250,6 +271,19 @@ impl JITState { true } + /// Assume that base pointer is equal to environment pointer in the current ISEQ. + /// Return true if it's safe to assume so. + fn assume_no_ep_escape(&mut self, asm: &mut Assembler, ocb: &mut OutlinedCb) -> bool { + if jit_ensure_block_entry_exit(self, asm, ocb).is_none() { + return false; // out of space, give up + } + if self.escapes_ep() { + return false; // EP has been escaped in this ISEQ. disable the optimization to avoid an invalidation loop. + } + self.no_ep_escape = true; + true + } + fn get_cfp(&self) -> *mut rb_control_frame_struct { unsafe { get_ec_cfp(self.ec) } } @@ -2203,16 +2237,22 @@ fn gen_get_lep(jit: &JITState, asm: &mut Assembler) -> Opnd { fn gen_getlocal_generic( jit: &mut JITState, asm: &mut Assembler, + ocb: &mut OutlinedCb, ep_offset: u32, level: u32, ) -> Option { - // Load environment pointer EP (level 0) from CFP - let ep_opnd = gen_get_ep(asm, level); + let local_opnd = if level == 0 && jit.assume_no_ep_escape(asm, ocb) { + // Load the local using SP register + asm.ctx.ep_opnd(-(ep_offset as i32)) + } else { + // Load environment pointer EP (level 0) from CFP + let ep_opnd = gen_get_ep(asm, level); - // Load the local from the block - // val = *(vm_get_ep(GET_EP(), level) - idx); - let offs = -(SIZEOF_VALUE_I32 * ep_offset as i32); - let local_opnd = Opnd::mem(64, ep_opnd, offs); + // Load the local from the block + // val = *(vm_get_ep(GET_EP(), level) - idx); + let offs = -(SIZEOF_VALUE_I32 * ep_offset as i32); + Opnd::mem(64, ep_opnd, offs) + }; // Write the local at SP let stack_top = if level == 0 { @@ -2230,29 +2270,29 @@ fn gen_getlocal_generic( fn gen_getlocal( jit: &mut JITState, asm: &mut Assembler, - _ocb: &mut OutlinedCb, + ocb: &mut OutlinedCb, ) -> Option { let idx = jit.get_arg(0).as_u32(); let level = jit.get_arg(1).as_u32(); - gen_getlocal_generic(jit, asm, idx, level) + gen_getlocal_generic(jit, asm, ocb, idx, level) } fn gen_getlocal_wc0( jit: &mut JITState, asm: &mut Assembler, - _ocb: &mut OutlinedCb, + ocb: &mut OutlinedCb, ) -> Option { let idx = jit.get_arg(0).as_u32(); - gen_getlocal_generic(jit, asm, idx, 0) + gen_getlocal_generic(jit, asm, ocb, idx, 0) } fn gen_getlocal_wc1( jit: &mut JITState, asm: &mut Assembler, - _ocb: &mut OutlinedCb, + ocb: &mut OutlinedCb, ) -> Option { let idx = jit.get_arg(0).as_u32(); - gen_getlocal_generic(jit, asm, idx, 1) + gen_getlocal_generic(jit, asm, ocb, idx, 1) } fn gen_setlocal_generic( @@ -2264,11 +2304,11 @@ fn gen_setlocal_generic( ) -> Option { let value_type = asm.ctx.get_opnd_type(StackOpnd(0)); - // Load environment pointer EP at level - let ep_opnd = gen_get_ep(asm, level); - // Fallback because of write barrier if asm.ctx.get_chain_depth() > 0 { + // Load environment pointer EP at level + let ep_opnd = gen_get_ep(asm, level); + // This function should not yield to the GC. // void rb_vm_env_write(const VALUE *ep, int index, VALUE v) let index = -(ep_offset as i64); @@ -2286,16 +2326,27 @@ fn gen_setlocal_generic( return Some(KeepCompiling); } - // Write barriers may be required when VM_ENV_FLAG_WB_REQUIRED is set, however write barriers - // only affect heap objects being written. If we know an immediate value is being written we - // can skip this check. - if !value_type.is_imm() { - // flags & VM_ENV_FLAG_WB_REQUIRED + let (flags_opnd, local_opnd) = if level == 0 && jit.assume_no_ep_escape(asm, ocb) { + // Load flags and the local using SP register + let local_opnd = asm.ctx.ep_opnd(-(ep_offset as i32)); + let flags_opnd = asm.ctx.ep_opnd(VM_ENV_DATA_INDEX_FLAGS as i32); + (flags_opnd, local_opnd) + } else { + // Load flags and the local for the level + let ep_opnd = gen_get_ep(asm, level); let flags_opnd = Opnd::mem( 64, ep_opnd, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_FLAGS as i32, ); + (flags_opnd, Opnd::mem(64, ep_opnd, -SIZEOF_VALUE_I32 * ep_offset as i32)) + }; + + // Write barriers may be required when VM_ENV_FLAG_WB_REQUIRED is set, however write barriers + // only affect heap objects being written. If we know an immediate value is being written we + // can skip this check. + if !value_type.is_imm() { + // flags & VM_ENV_FLAG_WB_REQUIRED asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into()); // if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0 @@ -2319,8 +2370,7 @@ fn gen_setlocal_generic( let stack_top = asm.stack_pop(1); // Write the value at the environment pointer - let offs = -(SIZEOF_VALUE_I32 * ep_offset as i32); - asm.mov(Opnd::mem(64, ep_opnd, offs), stack_top); + asm.mov(local_opnd, stack_top); Some(KeepCompiling) } diff --git a/yjit/src/core.rs b/yjit/src/core.rs index 4152eab02c..ceba1069e6 100644 --- a/yjit/src/core.rs +++ b/yjit/src/core.rs @@ -1138,8 +1138,12 @@ pub fn for_each_off_stack_iseq_payload(mut callback: /// Free the per-iseq payload #[no_mangle] -pub extern "C" fn rb_yjit_iseq_free(payload: *mut c_void) { +pub extern "C" fn rb_yjit_iseq_free(iseq: IseqPtr) { + // Free invariants for the ISEQ + iseq_free_invariants(iseq); + let payload = { + let payload = unsafe { rb_iseq_get_yjit_payload(iseq) }; if payload.is_null() { // Nothing to free. return; @@ -1266,7 +1270,11 @@ pub extern "C" fn rb_yjit_iseq_mark(payload: *mut c_void) { /// GC callback for updating GC objects in the per-iseq payload. /// This is a mirror of [rb_yjit_iseq_mark]. #[no_mangle] -pub extern "C" fn rb_yjit_iseq_update_references(payload: *mut c_void) { +pub extern "C" fn rb_yjit_iseq_update_references(iseq: IseqPtr) { + // Update ISEQ references in invariants + iseq_update_references_in_invariants(iseq); + + let payload = unsafe { rb_iseq_get_yjit_payload(iseq) }; let payload = if payload.is_null() { // Nothing to update. return; @@ -1657,6 +1665,9 @@ impl JITState { for klass in self.no_singleton_class_assumptions { track_no_singleton_class_assumption(blockref, klass); } + if self.no_ep_escape { + track_no_ep_escape_assumption(blockref, self.iseq); + } blockref } @@ -1798,6 +1809,13 @@ impl Context { return Opnd::mem(64, SP, offset); } + /// Get an operand for the adjusted environment pointer address using SP register. + /// This is valid only when a Binding object hasn't been created for the frame. + pub fn ep_opnd(&self, offset: i32) -> Opnd { + let ep_offset = self.get_stack_size() as i32 + 1; + self.sp_opnd(-ep_offset + offset) + } + /// Stop using a register for a given stack temp. /// This allows us to reuse the register for a value that we know is dead /// and will no longer be used (e.g. popped stack temp). @@ -3130,6 +3148,12 @@ pub fn defer_compilation( // Likely a stub due to the increased chain depth let target0_address = branch.set_target(0, blockid, &next_ctx, ocb); + // Pad the block if it has the potential to be invalidated. This must be + // done before gen_fn() in case the jump is overwritten by a fallthrough. + if jit.block_entry_exit.is_some() { + asm.pad_inval_patch(); + } + // Call the branch generation function asm_comment!(asm, "defer_compilation"); asm.mark_branch_start(&branch); @@ -3313,9 +3337,10 @@ pub fn invalidate_block_version(blockref: &BlockRef) { assert!( cb.get_write_ptr() <= block_end, - "invalidation wrote past end of block (code_size: {:?}, new_size: {})", + "invalidation wrote past end of block (code_size: {:?}, new_size: {}, start_addr: {:?})", block.code_size(), cb.get_write_ptr().as_offset() - block_start.as_offset(), + block.start_addr.raw_ptr(cb), ); cb.set_write_ptr(cur_pos); cb.set_dropped_bytes(cur_dropped_bytes); diff --git a/yjit/src/cruby.rs b/yjit/src/cruby.rs index 9547e3fa2c..d07262ad4f 100644 --- a/yjit/src/cruby.rs +++ b/yjit/src/cruby.rs @@ -170,6 +170,7 @@ pub use rb_iseq_encoded_size as get_iseq_encoded_size; pub use rb_get_iseq_body_local_iseq as get_iseq_body_local_iseq; pub use rb_get_iseq_body_iseq_encoded as get_iseq_body_iseq_encoded; pub use rb_get_iseq_body_stack_max as get_iseq_body_stack_max; +pub use rb_get_iseq_body_type as get_iseq_body_type; pub use rb_get_iseq_flags_has_lead as get_iseq_flags_has_lead; pub use rb_get_iseq_flags_has_opt as get_iseq_flags_has_opt; pub use rb_get_iseq_flags_has_kw as get_iseq_flags_has_kw; diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs index bdaee0534c..70578ec7e9 100644 --- a/yjit/src/cruby_bindings.inc.rs +++ b/yjit/src/cruby_bindings.inc.rs @@ -478,6 +478,16 @@ pub struct iseq_inline_iv_cache_entry { pub struct iseq_inline_cvar_cache_entry { pub entry: *mut rb_cvar_class_tbl_entry, } +pub const ISEQ_TYPE_TOP: rb_iseq_type = 0; +pub const ISEQ_TYPE_METHOD: rb_iseq_type = 1; +pub const ISEQ_TYPE_BLOCK: rb_iseq_type = 2; +pub const ISEQ_TYPE_CLASS: rb_iseq_type = 3; +pub const ISEQ_TYPE_RESCUE: rb_iseq_type = 4; +pub const ISEQ_TYPE_ENSURE: rb_iseq_type = 5; +pub const ISEQ_TYPE_EVAL: rb_iseq_type = 6; +pub const ISEQ_TYPE_MAIN: rb_iseq_type = 7; +pub const ISEQ_TYPE_PLAIN: rb_iseq_type = 8; +pub type rb_iseq_type = u32; pub const BUILTIN_ATTR_LEAF: rb_builtin_attr = 1; pub const BUILTIN_ATTR_SINGLE_NOARG_LEAF: rb_builtin_attr = 2; pub const BUILTIN_ATTR_INLINE_BLOCK: rb_builtin_attr = 4; @@ -1154,6 +1164,7 @@ extern "C" { pub fn rb_get_iseq_body_local_table_size(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint; pub fn rb_get_iseq_body_iseq_encoded(iseq: *const rb_iseq_t) -> *mut VALUE; pub fn rb_get_iseq_body_stack_max(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint; + pub fn rb_get_iseq_body_type(iseq: *const rb_iseq_t) -> rb_iseq_type; pub fn rb_get_iseq_flags_has_lead(iseq: *const rb_iseq_t) -> bool; pub fn rb_get_iseq_flags_has_opt(iseq: *const rb_iseq_t) -> bool; pub fn rb_get_iseq_flags_has_kw(iseq: *const rb_iseq_t) -> bool; diff --git a/yjit/src/invariants.rs b/yjit/src/invariants.rs index e460293440..a9432f8745 100644 --- a/yjit/src/invariants.rs +++ b/yjit/src/invariants.rs @@ -59,6 +59,11 @@ pub struct Invariants { /// there has been a singleton class for the class after boot, so you cannot /// assume no singleton class going forward. no_singleton_classes: HashMap>, + + /// A map from an ISEQ to a set of blocks that assume base pointer is equal + /// to environment pointer. When the set is empty, it means that EP has been + /// escaped in the ISEQ. + no_ep_escape_iseqs: HashMap>, } /// Private singleton instance of the invariants global struct. @@ -76,6 +81,7 @@ impl Invariants { constant_state_blocks: HashMap::new(), block_constant_states: HashMap::new(), no_singleton_classes: HashMap::new(), + no_ep_escape_iseqs: HashMap::new(), }); } } @@ -154,6 +160,43 @@ pub fn has_singleton_class_of(klass: VALUE) -> bool { .map_or(false, |blocks| blocks.is_empty()) } +/// Track that a block will assume that base pointer is equal to environment pointer. +pub fn track_no_ep_escape_assumption(uninit_block: BlockRef, iseq: IseqPtr) { + Invariants::get_instance() + .no_ep_escape_iseqs + .entry(iseq) + .or_default() + .insert(uninit_block); +} + +/// Returns true if a given ISEQ has previously escaped an environment. +pub fn iseq_escapes_ep(iseq: IseqPtr) -> bool { + Invariants::get_instance() + .no_ep_escape_iseqs + .get(&iseq) + .map_or(false, |blocks| blocks.is_empty()) +} + +/// Update ISEQ references in invariants on GC compaction +pub fn iseq_update_references_in_invariants(iseq: IseqPtr) { + if unsafe { INVARIANTS.is_none() } { + return; + } + let no_ep_escape_iseqs = &mut Invariants::get_instance().no_ep_escape_iseqs; + if let Some(blocks) = no_ep_escape_iseqs.remove(&iseq) { + let new_iseq = unsafe { rb_gc_location(iseq.into()) }.as_iseq(); + no_ep_escape_iseqs.insert(new_iseq, blocks); + } +} + +/// Forget an ISEQ remembered in invariants +pub fn iseq_free_invariants(iseq: IseqPtr) { + if unsafe { INVARIANTS.is_none() } { + return; + } + Invariants::get_instance().no_ep_escape_iseqs.remove(&iseq); +} + // Checks rb_method_basic_definition_p and registers the current block for invalidation if method // lookup changes. // A "basic method" is one defined during VM boot, so we can use this to check assumptions based on @@ -420,6 +463,10 @@ pub fn block_assumptions_free(blockref: BlockRef) { for (_, blocks) in invariants.no_singleton_classes.iter_mut() { blocks.remove(&blockref); } + // Remove tracking for blocks assuming EP doesn't escape + for (_, blocks) in invariants.no_ep_escape_iseqs.iter_mut() { + blocks.remove(&blockref); + } } /// Callback from the opt_setinlinecache instruction in the interpreter. @@ -515,6 +562,34 @@ pub extern "C" fn rb_yjit_invalidate_no_singleton_class(klass: VALUE) { } } +/// Invalidate blocks for a given ISEQ that assumes environment pointer is +/// equal to base pointer. +#[no_mangle] +pub extern "C" fn rb_yjit_invalidate_ep_is_bp(iseq: IseqPtr) { + // Skip tracking EP escapes on boot. We don't need to invalidate anything during boot. + if unsafe { INVARIANTS.is_none() } { + return; + } + + // If an EP escape for this ISEQ is detected for the first time, invalidate all blocks + // associated to the ISEQ. + let no_ep_escape_iseqs = &mut Invariants::get_instance().no_ep_escape_iseqs; + match no_ep_escape_iseqs.get_mut(&iseq) { + Some(blocks) => { + // Invalidate existing blocks and let jit.ep_is_bp() + // return true when they are compiled again + for block in mem::take(blocks) { + invalidate_block_version(&block); + incr_counter!(invalidate_no_singleton_class); + } + } + None => { + // Let jit.ep_is_bp() return false for this ISEQ + no_ep_escape_iseqs.insert(iseq, HashSet::new()); + } + } +} + // Invalidate all generated code and patch C method return code to contain // logic for firing the c_return TracePoint event. Once rb_vm_barrier() // returns, all other ractors are pausing inside RB_VM_LOCK_ENTER(), which