YJIT: Bounds check every byte in the assembler

Previously, YJIT assumed that basic blocks never consume more than
1 KiB of memory. This assumption does not hold for long Ruby methods
such as the one in the following:

```ruby
eval(<<RUBY)
def set_local_a_lot
  #{'_=0;'*0x40000}
end
RUBY

set_local_a_lot
```

For low `--yjit-exec-mem-size` values, one basic block could exhaust the
entire buffer.

Introduce a new field `codeblock_t::dropped_bytes` that the assembler
sets whenever it runs out of space. Check this field in
gen_single_block() to respond to out of memory situations and other
error conditions. This design avoids making the control flow graph of
existing code generation functions more complex.

Use POSIX shell in misc/test_yjit_asm.sh since bash is expanding
`0%/*/*` differently.

Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
This commit is contained in:
Alan Wu 2021-12-01 14:15:23 -05:00
Родитель 3be067234f
Коммит f41b4d44f9
6 изменённых файлов: 81 добавлений и 62 удалений

Просмотреть файл

@ -1,9 +1,9 @@
#!/bin/bash
#!/bin/sh
set -e
set -x
clang -std=gnu99 -Wall -Werror -Wno-error=unused-function -Wshorten-64-to-32 -I ${0%/*/*} ${0%/*}/yjit_asm_tests.c -o asm_test
clang -std=gnu99 -Wall -Werror -Wno-error=unused-function -Wshorten-64-to-32 -I "${0%/*/*}" "${0%/*}/yjit_asm_tests.c" -o asm_test
./asm_test

Просмотреть файл

@ -26,7 +26,7 @@ void print_bytes(codeblock_t* cb)
{
for (uint32_t i = 0; i < cb->write_pos; ++i)
{
printf("%02X", (int)cb->mem_block[i]);
printf("%02X", (int)*cb_get_ptr(cb, i));
}
printf("\n");
@ -59,7 +59,7 @@ void check_bytes(codeblock_t* cb, const char* bytes)
char* endptr;
long int byte = strtol(byte_str, &endptr, 16);
uint8_t cb_byte = cb->mem_block[i];
uint8_t cb_byte = *cb_get_ptr(cb, i);
if (cb_byte != byte)
{

Просмотреть файл

@ -211,11 +211,10 @@ static uint8_t *alloc_exec_mem(uint32_t mem_size)
}
codeblock_t block;
block.current_aligned_write_pos = ALIGNED_WRITE_POSITION_NONE;
block.mem_block = mem_block;
block.mem_size = mem_size;
codeblock_t *cb = &block;
cb_init(cb, mem_block, mem_size);
codeblock_t * cb = &block;
// Fill the executable memory with INT3 (0xCC) so that
// executing uninitialized memory will fault
cb_mark_all_writeable(cb);
@ -233,7 +232,7 @@ static uint8_t *alloc_exec_mem(uint32_t mem_size)
void cb_init(codeblock_t *cb, uint8_t *mem_block, uint32_t mem_size)
{
assert (mem_block);
cb->mem_block = mem_block;
cb->mem_block_ = mem_block;
cb->mem_size = mem_size;
cb->write_pos = 0;
cb->num_labels = 0;
@ -241,42 +240,50 @@ void cb_init(codeblock_t *cb, uint8_t *mem_block, uint32_t mem_size)
cb->current_aligned_write_pos = ALIGNED_WRITE_POSITION_NONE;
}
// Set the current write position
void cb_set_pos(codeblock_t *cb, uint32_t pos)
{
// Assert here since while assembler functions do bounds checking, there is
// nothing stopping users from taking out an out-of-bounds pointer and
// doing bad accesses with it.
assert (pos < cb->mem_size);
cb->write_pos = pos;
}
// Align the current write position to a multiple of bytes
void cb_align_pos(codeblock_t *cb, uint32_t multiple)
{
// Compute the pointer modulo the given alignment boundary
uint8_t *ptr = &cb->mem_block[cb->write_pos];
uint8_t *ptr = cb_get_write_ptr(cb);
uint8_t *aligned_ptr = align_ptr(ptr, multiple);
const uint32_t write_pos = cb->write_pos;
// Pad the pointer by the necessary amount to align it
ptrdiff_t pad = aligned_ptr - ptr;
cb->write_pos += (int32_t)pad;
}
// Set the current write position
void cb_set_pos(codeblock_t *cb, uint32_t pos)
{
assert (pos < cb->mem_size);
cb->write_pos = pos;
cb_set_pos(cb, write_pos + (int32_t)pad);
}
// Set the current write position from a pointer
void cb_set_write_ptr(codeblock_t *cb, uint8_t *code_ptr)
{
intptr_t pos = code_ptr - cb->mem_block;
intptr_t pos = code_ptr - cb->mem_block_;
assert (pos < cb->mem_size);
cb->write_pos = (uint32_t)pos;
cb_set_pos(cb, (uint32_t)pos);
}
// Get a direct pointer into the executable memory block
uint8_t *cb_get_ptr(codeblock_t *cb, uint32_t index)
uint8_t *cb_get_ptr(const codeblock_t *cb, uint32_t index)
{
assert (index < cb->mem_size);
return &cb->mem_block[index];
if (index < cb->mem_size) {
return &cb->mem_block_[index];
}
else {
return NULL;
}
}
// Get a direct pointer to the current write position
uint8_t *cb_get_write_ptr(codeblock_t *cb)
uint8_t *cb_get_write_ptr(const codeblock_t *cb)
{
return cb_get_ptr(cb, cb->write_pos);
}
@ -284,10 +291,15 @@ uint8_t *cb_get_write_ptr(codeblock_t *cb)
// Write a byte at the current position
void cb_write_byte(codeblock_t *cb, uint8_t byte)
{
assert (cb->mem_block);
assert (cb->write_pos + 1 <= cb->mem_size);
cb_mark_position_writeable(cb, cb->write_pos);
cb->mem_block[cb->write_pos++] = byte;
assert (cb->mem_block_);
if (cb->write_pos < cb->mem_size) {
cb_mark_position_writeable(cb, cb->write_pos);
cb->mem_block_[cb->write_pos] = byte;
cb->write_pos++;
}
else {
cb->dropped_bytes = true;
}
}
// Write multiple bytes starting from the current position
@ -890,14 +902,18 @@ static void cb_write_jcc_ptr(codeblock_t *cb, const char *mnem, uint8_t op0, uin
cb_write_byte(cb, op1);
// Pointer to the end of this jump instruction
uint8_t *end_ptr = &cb->mem_block[cb->write_pos] + 4;
uint8_t *end_ptr = cb_get_ptr(cb, cb->write_pos + 4);
// Compute the jump offset
int64_t rel64 = (int64_t)(dst_ptr - end_ptr);
assert (rel64 >= INT32_MIN && rel64 <= INT32_MAX);
// Write the relative 32-bit jump offset
cb_write_int(cb, (int32_t)rel64, 32);
if (rel64 >= INT32_MIN && rel64 <= INT32_MAX) {
// Write the relative 32-bit jump offset
cb_write_int(cb, (int32_t)rel64, 32);
}
else {
// Offset doesn't fit in 4 bytes. Report error.
cb->dropped_bytes = true;
}
}
// Encode a conditional move instruction
@ -971,14 +987,13 @@ void call_ptr(codeblock_t *cb, x86opnd_t scratch_reg, uint8_t *dst_ptr)
assert (scratch_reg.type == OPND_REG);
// Pointer to the end of this call instruction
uint8_t *end_ptr = &cb->mem_block[cb->write_pos] + 5;
uint8_t *end_ptr = cb_get_ptr(cb, cb->write_pos + 5);
// Compute the jump offset
int64_t rel64 = (int64_t)(dst_ptr - end_ptr);
// If the offset fits in 32-bit
if (rel64 >= INT32_MIN && rel64 <= INT32_MAX)
{
if (rel64 >= INT32_MIN && rel64 <= INT32_MAX) {
call_rel32(cb, (int32_t)rel64);
return;
}
@ -1784,8 +1799,8 @@ void cb_write_lock_prefix(codeblock_t *cb)
void cb_mark_all_writeable(codeblock_t * cb)
{
if (mprotect(cb->mem_block, cb->mem_size, PROT_READ | PROT_WRITE)) {
fprintf(stderr, "Couldn't make JIT page (%p) writeable, errno: %s", (void *)cb->mem_block, strerror(errno));
if (mprotect(cb->mem_block_, cb->mem_size, PROT_READ | PROT_WRITE)) {
fprintf(stderr, "Couldn't make JIT page (%p) writeable, errno: %s", (void *)cb->mem_block_, strerror(errno));
abort();
}
}
@ -1797,8 +1812,9 @@ void cb_mark_position_writeable(codeblock_t * cb, uint32_t write_pos)
if (cb->current_aligned_write_pos != aligned_position) {
cb->current_aligned_write_pos = aligned_position;
if (mprotect(cb->mem_block + aligned_position, pagesize, PROT_READ | PROT_WRITE)) {
fprintf(stderr, "Couldn't make JIT page (%p) writeable, errno: %s", (void *)(cb->mem_block + aligned_position), strerror(errno));
void *const page_addr = cb_get_ptr(cb, aligned_position);
if (mprotect(page_addr, pagesize, PROT_READ | PROT_WRITE)) {
fprintf(stderr, "Couldn't make JIT page (%p) writeable, errno: %s", page_addr, strerror(errno));
abort();
}
}
@ -1807,8 +1823,8 @@ void cb_mark_position_writeable(codeblock_t * cb, uint32_t write_pos)
void cb_mark_all_executable(codeblock_t * cb)
{
cb->current_aligned_write_pos = ALIGNED_WRITE_POSITION_NONE;
if (mprotect(cb->mem_block, cb->mem_size, PROT_READ | PROT_EXEC)) {
fprintf(stderr, "Couldn't make JIT page (%p) executable, errno: %s", (void *)cb->mem_block, strerror(errno));
if (mprotect(cb->mem_block_, cb->mem_size, PROT_READ | PROT_EXEC)) {
fprintf(stderr, "Couldn't make JIT page (%p) executable, errno: %s", (void *)cb->mem_block_, strerror(errno));
abort();
}
}

Просмотреть файл

@ -26,12 +26,13 @@ typedef struct LabelRef
typedef struct CodeBlock
{
// Memory block
uint8_t *mem_block;
// Users are advised to not use this directly.
uint8_t *mem_block_;
// Memory block size
uint32_t mem_size;
/// Current writing position
// Current writing position
uint32_t write_pos;
// Table of registered label addresses
@ -50,14 +51,20 @@ typedef struct CodeBlock
// Number of references to labels
uint32_t num_refs;
// TODO: system for disassembly/comment strings, indexed by position
// Flag to enable or disable comments
bool has_asm;
// Keep track of the current aligned write position.
// Used for changing protection when writing to the JIT buffer
uint32_t current_aligned_write_pos;
// Set if the assembler is unable to output some instructions,
// for example, when there is not enough space or when a jump
// target is too far away.
bool dropped_bytes;
// Flag to enable or disable comments
bool has_asm;
} codeblock_t;
// 1 is not aligned so this won't match any pages
@ -258,8 +265,8 @@ static inline void cb_init(codeblock_t *cb, uint8_t *mem_block, uint32_t mem_siz
static inline void cb_align_pos(codeblock_t *cb, uint32_t multiple);
static inline void cb_set_pos(codeblock_t *cb, uint32_t pos);
static inline void cb_set_write_ptr(codeblock_t *cb, uint8_t *code_ptr);
static inline uint8_t *cb_get_ptr(codeblock_t *cb, uint32_t index);
static inline uint8_t *cb_get_write_ptr(codeblock_t *cb);
static inline uint8_t *cb_get_ptr(const codeblock_t *cb, uint32_t index);
static inline uint8_t *cb_get_write_ptr(const codeblock_t *cb);
static inline void cb_write_byte(codeblock_t *cb, uint8_t byte);
static inline void cb_write_bytes(codeblock_t *cb, uint32_t num_bytes, ...);
static inline void cb_write_int(codeblock_t *cb, uint64_t val, uint32_t num_bits);

Просмотреть файл

@ -554,7 +554,7 @@ yjit_entry_prologue(codeblock_t *cb, const rb_iseq_t *iseq)
const uint32_t old_write_pos = cb->write_pos;
// Align the current write positon to cache line boundaries
// Align the current write position to cache line boundaries
cb_align_pos(cb, 64);
uint8_t *code_ptr = cb_get_ptr(cb, cb->write_pos);
@ -640,16 +640,6 @@ gen_single_block(blockid_t blockid, const ctx_t *start_ctx, rb_execution_context
{
RUBY_ASSERT(cb != NULL);
// Check if there is enough executable memory.
// FIXME: This bound isn't enforced and long blocks can potentially use more.
enum { MAX_CODE_PER_BLOCK = 1024 };
if (cb->write_pos + MAX_CODE_PER_BLOCK >= cb->mem_size) {
return NULL;
}
if (ocb->write_pos + MAX_CODE_PER_BLOCK >= ocb->mem_size) {
return NULL;
}
// Allocate the new block
block_t *block = calloc(1, sizeof(block_t));
if (!block) {
@ -778,6 +768,12 @@ gen_single_block(blockid_t blockid, const ctx_t *start_ctx, rb_execution_context
// doesn't go to the next instruction.
RUBY_ASSERT(!jit.record_boundary_patch_point);
// If code for the block doesn't fit, free the block and fail.
if (cb->dropped_bytes || ocb->dropped_bytes) {
yjit_free_block(block);
return NULL;
}
if (YJIT_DUMP_MODE >= 2) {
// Dump list of compiled instrutions
fprintf(stderr, "Compiled the following for iseq=%p:\n", (void *)iseq);

Просмотреть файл

@ -1150,7 +1150,7 @@ yjit_get_code_page(uint32_t cb_bytes_needed, uint32_t ocb_bytes_needed)
code_page_t *new_code_page = rb_yjit_code_page_unwrap(yjit_cur_code_page);
// Jump to the new code page
jmp_ptr(&code_page->cb, new_code_page->cb.mem_block);
jmp_ptr(&code_page->cb, cb_get_ptr(&new_code_page->cb, 0));
return yjit_cur_code_page;
}