YJIT: Reset dropped_bytes when patching code

We switch to a new page when we detect dropped_bytes flipping from false
to true. Previously, when we patch code for invalidation during code gc,
we start with the flag being set to true, so we failed to apply patches
that straddle pages. We would write out jumps half way and then stop,
which left the code corrupted.

Reset the flag before patching so we patch across pages properly.
This commit is contained in:
Alan Wu 2022-11-07 16:49:48 -05:00
Родитель 1466682a23
Коммит 5d95cd99f4
3 изменённых файлов: 19 добавлений и 6 удалений

Просмотреть файл

@ -463,6 +463,12 @@ impl CodeBlock {
self.dropped_bytes
}
/// To patch code that straddle pages correctly, we need to start with
/// the dropped bytes flag unset so we can detect when to switch to a new page.
pub fn set_dropped_bytes(&mut self, dropped_bytes: bool) {
self.dropped_bytes = dropped_bytes;
}
/// Allocate a new label with a given name
pub fn new_label(&mut self, name: String) -> usize {
assert!(!name.contains(' '), "use underscores in label names, not spaces");

Просмотреть файл

@ -1599,18 +1599,13 @@ fn regenerate_branch(cb: &mut CodeBlock, branch: &mut Branch) {
}
*/
let old_write_pos = cb.get_write_pos();
let mut block = branch.block.borrow_mut();
let branch_terminates_block = branch.end_addr == block.end_addr;
// Rewrite the branch
assert!(branch.dst_addrs[0].is_some());
cb.set_write_ptr(branch.start_addr.unwrap());
// Generate the branch
let mut asm = Assembler::new();
asm.comment("regenerate_branch");
(branch.gen_fn)(
&mut asm,
branch.dst_addrs[0].unwrap(),
@ -1618,6 +1613,11 @@ fn regenerate_branch(cb: &mut CodeBlock, branch: &mut Branch) {
branch.shape,
);
// Rewrite the branch
let old_write_pos = cb.get_write_pos();
let old_dropped_bytes = cb.has_dropped_bytes();
cb.set_write_ptr(branch.start_addr.unwrap());
cb.set_dropped_bytes(false);
asm.compile(cb);
branch.end_addr = Some(cb.get_write_ptr());
@ -1638,6 +1638,7 @@ fn regenerate_branch(cb: &mut CodeBlock, branch: &mut Branch) {
if old_write_pos > cb.get_write_pos() {
// We rewound cb->write_pos to generate the branch, now restore it.
cb.set_pos(old_write_pos);
cb.set_dropped_bytes(old_dropped_bytes);
} else {
// The branch sits at the end of cb and consumed some memory.
// Keep cb.write_pos.
@ -2193,10 +2194,12 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
// Patch in a jump to block.entry_exit.
let cur_pos = cb.get_write_ptr();
let cur_dropped_bytes = cb.has_dropped_bytes();
cb.set_write_ptr(block_start);
let mut asm = Assembler::new();
asm.jmp(block_entry_exit.into());
cb.set_dropped_bytes(false);
asm.compile(&mut cb);
assert!(
@ -2206,6 +2209,7 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
cb.get_write_ptr().into_i64() - block_start.into_i64(),
);
cb.set_write_ptr(cur_pos);
cb.set_dropped_bytes(cur_dropped_bytes);
}
}

Просмотреть файл

@ -558,6 +558,7 @@ pub extern "C" fn rb_yjit_tracing_invalidate_all() {
// Apply patches
let old_pos = cb.get_write_pos();
let old_dropped_bytes = cb.has_dropped_bytes();
let mut patches = CodegenGlobals::take_global_inval_patches();
patches.sort_by_cached_key(|patch| patch.inline_patch_pos.raw_ptr());
let mut last_patch_end = std::ptr::null();
@ -568,10 +569,12 @@ pub extern "C" fn rb_yjit_tracing_invalidate_all() {
asm.jmp(patch.outlined_target_pos.into());
cb.set_write_ptr(patch.inline_patch_pos);
cb.set_dropped_bytes(false);
asm.compile(cb);
last_patch_end = cb.get_write_ptr().raw_ptr();
}
cb.set_pos(old_pos);
cb.set_dropped_bytes(old_dropped_bytes);
// Freeze invalidated part of the codepage. We only want to wait for
// running instances of the code to exit from now on, so we shouldn't