2021-10-02 01:38:39 +03:00
|
|
|
// This file is a fragment of the yjit.o compilation unit. See yjit.c.
|
2021-03-12 20:54:54 +03:00
|
|
|
#include "internal.h"
|
|
|
|
#include "vm_sync.h"
|
|
|
|
#include "builtin.h"
|
|
|
|
|
2021-04-24 07:16:48 +03:00
|
|
|
#include "yjit.h"
|
2021-03-07 02:46:56 +03:00
|
|
|
#include "yjit_asm.h"
|
|
|
|
#include "yjit_iface.h"
|
|
|
|
#include "yjit_core.h"
|
|
|
|
#include "yjit_codegen.h"
|
2020-12-10 08:06:10 +03:00
|
|
|
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
// For exiting from YJIT frame from branch_stub_hit().
|
|
|
|
// Filled by gen_code_for_exit_from_stub().
|
|
|
|
static uint8_t *code_for_exit_from_stub = NULL;
|
|
|
|
|
2020-12-09 00:54:41 +03:00
|
|
|
/*
|
|
|
|
Get an operand for the adjusted stack pointer address
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static x86opnd_t
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_sp_opnd(ctx_t *ctx, int32_t offset_bytes)
|
2020-12-09 00:54:41 +03:00
|
|
|
{
|
2021-02-10 00:24:06 +03:00
|
|
|
int32_t offset = (ctx->sp_offset * sizeof(VALUE)) + offset_bytes;
|
2020-12-09 00:54:41 +03:00
|
|
|
return mem_opnd(64, REG_SP, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-07-14 21:36:33 +03:00
|
|
|
Push one new value on the temp stack with an explicit mapping
|
2020-12-09 00:54:41 +03:00
|
|
|
Return a pointer to the new stack top
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static x86opnd_t
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_stack_push_mapping(ctx_t *ctx, temp_type_mapping_t mapping)
|
2020-12-09 00:54:41 +03:00
|
|
|
{
|
2021-11-18 18:44:31 +03:00
|
|
|
// If type propagation is disabled, store no types
|
|
|
|
if (rb_yjit_opts.no_type_prop) {
|
|
|
|
mapping.type = TYPE_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2021-07-14 21:36:33 +03:00
|
|
|
// Keep track of the type and mapping of the value
|
2021-03-31 22:54:46 +03:00
|
|
|
if (ctx->stack_size < MAX_TEMP_TYPES) {
|
2021-07-14 21:36:33 +03:00
|
|
|
ctx->temp_mapping[ctx->stack_size] = mapping.mapping;
|
|
|
|
ctx->temp_types[ctx->stack_size] = mapping.type;
|
|
|
|
|
|
|
|
RUBY_ASSERT(mapping.mapping.kind != TEMP_LOCAL || mapping.mapping.idx < MAX_LOCAL_TYPES);
|
2021-07-28 09:41:29 +03:00
|
|
|
RUBY_ASSERT(mapping.mapping.kind != TEMP_STACK || mapping.mapping.idx == 0);
|
|
|
|
RUBY_ASSERT(mapping.mapping.kind != TEMP_SELF || mapping.mapping.idx == 0);
|
2021-03-31 22:54:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx->stack_size += 1;
|
|
|
|
ctx->sp_offset += 1;
|
|
|
|
|
|
|
|
// SP points just above the topmost value
|
|
|
|
int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE);
|
|
|
|
return mem_opnd(64, REG_SP, offset);
|
|
|
|
}
|
|
|
|
|
2021-07-14 21:36:33 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
Push one new value on the temp stack
|
|
|
|
Return a pointer to the new stack top
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static x86opnd_t
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_stack_push(ctx_t *ctx, val_type_t type)
|
2021-07-14 21:36:33 +03:00
|
|
|
{
|
|
|
|
temp_type_mapping_t mapping = { MAP_STACK, type };
|
|
|
|
return ctx_stack_push_mapping(ctx, mapping);
|
|
|
|
}
|
|
|
|
|
2021-03-31 22:54:46 +03:00
|
|
|
/*
|
|
|
|
Push the self value on the stack
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static x86opnd_t
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_stack_push_self(ctx_t *ctx)
|
2021-03-31 22:54:46 +03:00
|
|
|
{
|
2021-07-14 21:36:33 +03:00
|
|
|
temp_type_mapping_t mapping = { MAP_SELF, TYPE_UNKNOWN };
|
|
|
|
return ctx_stack_push_mapping(ctx, mapping);
|
2020-12-09 00:54:41 +03:00
|
|
|
}
|
|
|
|
|
2021-04-06 19:00:09 +03:00
|
|
|
/*
|
|
|
|
Push a local variable on the stack
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static x86opnd_t
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_stack_push_local(ctx_t *ctx, size_t local_idx)
|
2021-04-06 19:00:09 +03:00
|
|
|
{
|
2021-07-14 21:36:33 +03:00
|
|
|
if (local_idx >= MAX_LOCAL_TYPES) {
|
|
|
|
return ctx_stack_push(ctx, TYPE_UNKNOWN);
|
2021-04-06 19:00:09 +03:00
|
|
|
}
|
|
|
|
|
2021-07-14 21:36:33 +03:00
|
|
|
temp_type_mapping_t mapping = {
|
|
|
|
(temp_mapping_t){ .kind = TEMP_LOCAL, .idx = local_idx },
|
|
|
|
TYPE_UNKNOWN
|
|
|
|
};
|
2021-11-18 18:44:31 +03:00
|
|
|
|
2021-07-14 21:36:33 +03:00
|
|
|
return ctx_stack_push_mapping(ctx, mapping);
|
2021-04-06 19:00:09 +03:00
|
|
|
}
|
|
|
|
|
2020-12-09 00:54:41 +03:00
|
|
|
/*
|
|
|
|
Pop N values off the stack
|
|
|
|
Return a pointer to the stack top before the pop operation
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static x86opnd_t
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_stack_pop(ctx_t *ctx, size_t n)
|
2020-12-09 00:54:41 +03:00
|
|
|
{
|
2021-01-21 00:58:09 +03:00
|
|
|
RUBY_ASSERT(n <= ctx->stack_size);
|
|
|
|
|
2020-12-09 00:54:41 +03:00
|
|
|
// SP points just above the topmost value
|
2021-02-10 00:24:06 +03:00
|
|
|
int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE);
|
2020-12-09 00:54:41 +03:00
|
|
|
x86opnd_t top = mem_opnd(64, REG_SP, offset);
|
|
|
|
|
2021-01-21 00:58:09 +03:00
|
|
|
// Clear the types of the popped values
|
|
|
|
for (size_t i = 0; i < n; ++i)
|
|
|
|
{
|
|
|
|
size_t idx = ctx->stack_size - i - 1;
|
2021-03-31 22:54:46 +03:00
|
|
|
if (idx < MAX_TEMP_TYPES) {
|
|
|
|
ctx->temp_types[idx] = TYPE_UNKNOWN;
|
|
|
|
ctx->temp_mapping[idx] = MAP_STACK;
|
|
|
|
}
|
2021-01-21 00:58:09 +03:00
|
|
|
}
|
|
|
|
|
2020-12-10 08:06:10 +03:00
|
|
|
ctx->stack_size -= n;
|
2021-02-10 00:24:06 +03:00
|
|
|
ctx->sp_offset -= n;
|
2020-12-09 00:54:41 +03:00
|
|
|
|
|
|
|
return top;
|
|
|
|
}
|
|
|
|
|
2021-01-21 00:58:09 +03:00
|
|
|
/**
|
|
|
|
Get an operand pointing to a slot on the temp stack
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static x86opnd_t
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_stack_opnd(ctx_t *ctx, int32_t idx)
|
2020-12-09 00:54:41 +03:00
|
|
|
{
|
|
|
|
// SP points just above the topmost value
|
2021-02-10 00:24:06 +03:00
|
|
|
int32_t offset = (ctx->sp_offset - 1 - idx) * sizeof(VALUE);
|
2020-12-09 00:54:41 +03:00
|
|
|
x86opnd_t opnd = mem_opnd(64, REG_SP, offset);
|
|
|
|
|
|
|
|
return opnd;
|
|
|
|
}
|
2020-12-11 00:59:13 +03:00
|
|
|
|
2021-01-21 00:58:09 +03:00
|
|
|
/**
|
2021-04-08 23:40:08 +03:00
|
|
|
Get the type of an instruction operand
|
2021-01-21 00:58:09 +03:00
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static val_type_t
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_get_opnd_type(const ctx_t *ctx, insn_opnd_t opnd)
|
2021-01-21 00:58:09 +03:00
|
|
|
{
|
2021-04-08 23:40:08 +03:00
|
|
|
if (opnd.is_self)
|
|
|
|
return ctx->self_type;
|
2021-01-22 20:22:34 +03:00
|
|
|
|
2021-08-09 12:11:05 +03:00
|
|
|
RUBY_ASSERT(opnd.idx < ctx->stack_size);
|
|
|
|
int stack_idx = ctx->stack_size - 1 - opnd.idx;
|
|
|
|
|
|
|
|
// If outside of tracked range, do nothing
|
|
|
|
if (stack_idx >= MAX_TEMP_TYPES)
|
2021-03-31 22:54:46 +03:00
|
|
|
return TYPE_UNKNOWN;
|
|
|
|
|
2021-08-09 12:11:05 +03:00
|
|
|
temp_mapping_t mapping = ctx->temp_mapping[stack_idx];
|
2021-03-31 22:54:46 +03:00
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
switch (mapping.kind) {
|
|
|
|
case TEMP_SELF:
|
2021-03-31 22:54:46 +03:00
|
|
|
return ctx->self_type;
|
2021-04-06 21:44:28 +03:00
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case TEMP_STACK:
|
2021-04-08 23:40:08 +03:00
|
|
|
return ctx->temp_types[ctx->stack_size - 1 - opnd.idx];
|
2021-03-31 22:54:46 +03:00
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case TEMP_LOCAL:
|
2021-04-06 21:44:28 +03:00
|
|
|
RUBY_ASSERT(mapping.idx < MAX_LOCAL_TYPES);
|
|
|
|
return ctx->local_types[mapping.idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_bug("unreachable");
|
|
|
|
}
|
|
|
|
|
2021-10-02 01:38:39 +03:00
|
|
|
static int type_diff(val_type_t src, val_type_t dst);
|
|
|
|
|
2021-07-14 21:52:00 +03:00
|
|
|
#define UPGRADE_TYPE(dest, src) do { \
|
|
|
|
RUBY_ASSERT(type_diff((src), (dest)) != INT_MAX); \
|
|
|
|
(dest) = (src); \
|
|
|
|
} while (false)
|
|
|
|
|
2021-04-06 21:44:28 +03:00
|
|
|
/**
|
2021-07-30 02:40:07 +03:00
|
|
|
Upgrade (or "learn") the type of an instruction operand
|
|
|
|
This value must be compatible and at least as specific as the previously known type.
|
|
|
|
If this value originated from self, or an lvar, the learned type will be
|
|
|
|
propagated back to its source.
|
2021-04-06 21:44:28 +03:00
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
|
|
|
ctx_upgrade_opnd_type(ctx_t *ctx, insn_opnd_t opnd, val_type_t type)
|
2021-04-06 21:44:28 +03:00
|
|
|
{
|
2021-11-18 18:44:31 +03:00
|
|
|
// If type propagation is disabled, store no types
|
|
|
|
if (rb_yjit_opts.no_type_prop)
|
|
|
|
return;
|
|
|
|
|
2021-04-08 23:40:08 +03:00
|
|
|
if (opnd.is_self) {
|
2021-07-14 21:52:00 +03:00
|
|
|
UPGRADE_TYPE(ctx->self_type, type);
|
2021-04-08 23:40:08 +03:00
|
|
|
return;
|
|
|
|
}
|
2021-04-06 21:44:28 +03:00
|
|
|
|
2021-08-01 23:05:51 +03:00
|
|
|
RUBY_ASSERT(opnd.idx < ctx->stack_size);
|
|
|
|
int stack_idx = ctx->stack_size - 1 - opnd.idx;
|
|
|
|
|
|
|
|
// If outside of tracked range, do nothing
|
|
|
|
if (stack_idx >= MAX_TEMP_TYPES)
|
2021-04-06 21:44:28 +03:00
|
|
|
return;
|
|
|
|
|
2021-08-01 23:05:51 +03:00
|
|
|
temp_mapping_t mapping = ctx->temp_mapping[stack_idx];
|
2021-04-06 21:44:28 +03:00
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
switch (mapping.kind) {
|
|
|
|
case TEMP_SELF:
|
2021-07-14 21:52:00 +03:00
|
|
|
UPGRADE_TYPE(ctx->self_type, type);
|
2021-04-06 21:44:28 +03:00
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case TEMP_STACK:
|
2021-08-01 23:05:51 +03:00
|
|
|
UPGRADE_TYPE(ctx->temp_types[stack_idx], type);
|
2021-04-06 21:44:28 +03:00
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case TEMP_LOCAL:
|
2021-04-06 21:44:28 +03:00
|
|
|
RUBY_ASSERT(mapping.idx < MAX_LOCAL_TYPES);
|
2021-07-14 21:52:00 +03:00
|
|
|
UPGRADE_TYPE(ctx->local_types[mapping.idx], type);
|
2021-04-06 21:44:28 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-30 02:40:07 +03:00
|
|
|
/*
|
|
|
|
Get both the type and mapping (where the value originates) of an operand.
|
|
|
|
This is can be used with ctx_stack_push_mapping or ctx_set_opnd_mapping to copy
|
|
|
|
a stack value's type while maintaining the mapping.
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static temp_type_mapping_t
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_get_opnd_mapping(const ctx_t *ctx, insn_opnd_t opnd)
|
2021-07-14 21:36:33 +03:00
|
|
|
{
|
|
|
|
temp_type_mapping_t type_mapping;
|
|
|
|
type_mapping.type = ctx_get_opnd_type(ctx, opnd);
|
|
|
|
|
|
|
|
if (opnd.is_self) {
|
|
|
|
type_mapping.mapping = MAP_SELF;
|
|
|
|
return type_mapping;
|
|
|
|
}
|
|
|
|
|
|
|
|
RUBY_ASSERT(opnd.idx < ctx->stack_size);
|
|
|
|
int stack_idx = ctx->stack_size - 1 - opnd.idx;
|
|
|
|
|
|
|
|
if (stack_idx < MAX_TEMP_TYPES) {
|
|
|
|
type_mapping.mapping = ctx->temp_mapping[stack_idx];
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else {
|
2021-07-14 21:36:33 +03:00
|
|
|
// We can't know the source of this stack operand, so we assume it is
|
|
|
|
// a stack-only temporary. type will be UNKNOWN
|
|
|
|
RUBY_ASSERT(type_mapping.type.type == ETYPE_UNKNOWN);
|
|
|
|
type_mapping.mapping = MAP_STACK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return type_mapping;
|
|
|
|
}
|
|
|
|
|
2021-07-30 02:40:07 +03:00
|
|
|
/*
|
|
|
|
Overwrite both the type and mapping of a stack operand.
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
2021-09-29 21:58:01 +03:00
|
|
|
ctx_set_opnd_mapping(ctx_t *ctx, insn_opnd_t opnd, temp_type_mapping_t type_mapping)
|
2021-07-14 21:36:33 +03:00
|
|
|
{
|
|
|
|
// self is always MAP_SELF
|
|
|
|
RUBY_ASSERT(!opnd.is_self);
|
|
|
|
|
|
|
|
RUBY_ASSERT(opnd.idx < ctx->stack_size);
|
|
|
|
int stack_idx = ctx->stack_size - 1 - opnd.idx;
|
|
|
|
|
2021-11-18 18:44:31 +03:00
|
|
|
// If type propagation is disabled, store no types
|
|
|
|
if (rb_yjit_opts.no_type_prop)
|
|
|
|
return;
|
|
|
|
|
2021-07-14 21:36:33 +03:00
|
|
|
// If outside of tracked range, do nothing
|
|
|
|
if (stack_idx >= MAX_TEMP_TYPES)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ctx->temp_mapping[stack_idx] = type_mapping.mapping;
|
|
|
|
|
|
|
|
// Only used when mapping == MAP_STACK
|
|
|
|
ctx->temp_types[stack_idx] = type_mapping.type;
|
|
|
|
}
|
|
|
|
|
2021-04-06 21:44:28 +03:00
|
|
|
/**
|
|
|
|
Set the type of a local variable
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
|
|
|
ctx_set_local_type(ctx_t *ctx, size_t idx, val_type_t type)
|
2021-04-06 21:44:28 +03:00
|
|
|
{
|
2021-11-18 18:44:31 +03:00
|
|
|
// If type propagation is disabled, store no types
|
|
|
|
if (rb_yjit_opts.no_type_prop)
|
|
|
|
return;
|
|
|
|
|
2021-04-09 21:48:02 +03:00
|
|
|
if (idx >= MAX_LOCAL_TYPES)
|
2021-04-06 21:44:28 +03:00
|
|
|
return;
|
|
|
|
|
2021-08-11 01:41:27 +03:00
|
|
|
// If any values on the stack map to this local we must detach them
|
|
|
|
for (int i = 0; i < MAX_TEMP_TYPES; i++) {
|
|
|
|
temp_mapping_t *mapping = &ctx->temp_mapping[i];
|
|
|
|
if (mapping->kind == TEMP_LOCAL && mapping->idx == idx) {
|
|
|
|
ctx->temp_types[i] = ctx->local_types[mapping->idx];
|
|
|
|
*mapping = MAP_STACK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-06 21:44:28 +03:00
|
|
|
ctx->local_types[idx] = type;
|
2021-03-31 22:54:46 +03:00
|
|
|
}
|
|
|
|
|
2021-04-09 18:44:35 +03:00
|
|
|
// Erase local variable type information
|
|
|
|
// eg: because of a call we can't track
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
|
|
|
ctx_clear_local_types(ctx_t *ctx)
|
2021-04-09 18:44:35 +03:00
|
|
|
{
|
2021-07-12 23:35:09 +03:00
|
|
|
// When clearing local types we must detach any stack mappings to those
|
|
|
|
// locals. Even if local values may have changed, stack values will not.
|
2021-07-28 09:38:24 +03:00
|
|
|
for (int i = 0; i < MAX_TEMP_TYPES; i++) {
|
2021-07-12 23:35:09 +03:00
|
|
|
temp_mapping_t *mapping = &ctx->temp_mapping[i];
|
|
|
|
if (mapping->kind == TEMP_LOCAL) {
|
|
|
|
RUBY_ASSERT(mapping->idx < MAX_LOCAL_TYPES);
|
|
|
|
ctx->temp_types[i] = ctx->local_types[mapping->idx];
|
|
|
|
*mapping = MAP_STACK;
|
|
|
|
}
|
2021-07-28 09:38:24 +03:00
|
|
|
RUBY_ASSERT(mapping->kind == TEMP_STACK || mapping->kind == TEMP_SELF);
|
2021-07-12 23:35:09 +03:00
|
|
|
}
|
2021-04-09 18:44:35 +03:00
|
|
|
memset(&ctx->local_types, 0, sizeof(ctx->local_types));
|
|
|
|
}
|
|
|
|
|
2021-08-05 01:18:37 +03:00
|
|
|
|
|
|
|
/* This returns an appropriate val_type_t based on a known value */
|
2021-10-02 01:38:39 +03:00
|
|
|
static val_type_t
|
2021-08-05 01:18:37 +03:00
|
|
|
yjit_type_of_value(VALUE val)
|
|
|
|
{
|
|
|
|
if (SPECIAL_CONST_P(val)) {
|
|
|
|
if (FIXNUM_P(val)) {
|
|
|
|
return TYPE_FIXNUM;
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else if (NIL_P(val)) {
|
2021-08-05 01:18:37 +03:00
|
|
|
return TYPE_NIL;
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else if (val == Qtrue) {
|
2021-08-05 01:18:37 +03:00
|
|
|
return TYPE_TRUE;
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else if (val == Qfalse) {
|
2021-08-05 01:18:37 +03:00
|
|
|
return TYPE_FALSE;
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else if (STATIC_SYM_P(val)) {
|
2021-08-05 01:18:37 +03:00
|
|
|
return TYPE_STATIC_SYMBOL;
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else if (FLONUM_P(val)) {
|
2021-08-05 01:18:37 +03:00
|
|
|
return TYPE_FLONUM;
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else {
|
2021-08-05 01:18:37 +03:00
|
|
|
RUBY_ASSERT(false);
|
|
|
|
UNREACHABLE_RETURN(TYPE_IMM);
|
|
|
|
}
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else {
|
2021-08-05 01:18:37 +03:00
|
|
|
switch (BUILTIN_TYPE(val)) {
|
2021-09-29 22:38:57 +03:00
|
|
|
case T_ARRAY:
|
|
|
|
return TYPE_ARRAY;
|
|
|
|
case T_HASH:
|
|
|
|
return TYPE_HASH;
|
|
|
|
case T_STRING:
|
|
|
|
return TYPE_STRING;
|
|
|
|
default:
|
|
|
|
// generic heap object
|
|
|
|
return TYPE_HEAP;
|
2021-08-05 01:18:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-28 09:41:29 +03:00
|
|
|
/* The name of a type, for debugging */
|
2021-10-02 01:38:39 +03:00
|
|
|
RBIMPL_ATTR_MAYBE_UNUSED()
|
|
|
|
static const char *
|
2021-07-28 09:41:29 +03:00
|
|
|
yjit_type_name(val_type_t type)
|
|
|
|
{
|
|
|
|
RUBY_ASSERT(!(type.is_imm && type.is_heap));
|
|
|
|
|
|
|
|
switch (type.type) {
|
2021-09-29 22:38:57 +03:00
|
|
|
case ETYPE_UNKNOWN:
|
|
|
|
if (type.is_imm) {
|
|
|
|
return "unknown immediate";
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else if (type.is_heap) {
|
2021-09-29 22:38:57 +03:00
|
|
|
return "unknown heap";
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else {
|
2021-09-29 22:38:57 +03:00
|
|
|
return "unknown";
|
|
|
|
}
|
|
|
|
case ETYPE_NIL:
|
|
|
|
return "nil";
|
|
|
|
case ETYPE_TRUE:
|
|
|
|
return "true";
|
|
|
|
case ETYPE_FALSE:
|
|
|
|
return "false";
|
|
|
|
case ETYPE_FIXNUM:
|
|
|
|
return "fixnum";
|
|
|
|
case ETYPE_FLONUM:
|
|
|
|
return "flonum";
|
|
|
|
case ETYPE_ARRAY:
|
|
|
|
return "array";
|
|
|
|
case ETYPE_HASH:
|
|
|
|
return "hash";
|
|
|
|
case ETYPE_SYMBOL:
|
|
|
|
return "symbol";
|
|
|
|
case ETYPE_STRING:
|
|
|
|
return "string";
|
2021-07-28 09:41:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
UNREACHABLE_RETURN("");
|
|
|
|
}
|
|
|
|
|
2021-03-31 22:54:46 +03:00
|
|
|
/*
|
|
|
|
Compute a difference between two value types
|
|
|
|
Returns 0 if the two are the same
|
|
|
|
Returns > 0 if different but compatible
|
|
|
|
Returns INT_MAX if incompatible
|
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static int
|
|
|
|
type_diff(val_type_t src, val_type_t dst)
|
2021-03-31 22:54:46 +03:00
|
|
|
{
|
|
|
|
RUBY_ASSERT(!src.is_heap || !src.is_imm);
|
|
|
|
RUBY_ASSERT(!dst.is_heap || !dst.is_imm);
|
|
|
|
|
2021-04-01 17:43:54 +03:00
|
|
|
// If dst assumes heap but src doesn't
|
|
|
|
if (dst.is_heap && !src.is_heap)
|
2021-03-31 22:54:46 +03:00
|
|
|
return INT_MAX;
|
|
|
|
|
2021-04-01 17:43:54 +03:00
|
|
|
// If dst assumes imm but src doesn't
|
|
|
|
if (dst.is_imm && !src.is_imm)
|
2021-03-31 22:54:46 +03:00
|
|
|
return INT_MAX;
|
|
|
|
|
2021-04-01 17:43:54 +03:00
|
|
|
// If dst assumes known type different from src
|
|
|
|
if (dst.type != ETYPE_UNKNOWN && dst.type != src.type)
|
2021-03-31 22:54:46 +03:00
|
|
|
return INT_MAX;
|
|
|
|
|
2021-04-01 17:43:54 +03:00
|
|
|
if (dst.is_heap != src.is_heap)
|
2021-03-31 22:54:46 +03:00
|
|
|
return 1;
|
|
|
|
|
2021-04-01 17:43:54 +03:00
|
|
|
if (dst.is_imm != src.is_imm)
|
2021-03-31 22:54:46 +03:00
|
|
|
return 1;
|
2021-01-21 00:58:09 +03:00
|
|
|
|
2021-04-01 17:43:54 +03:00
|
|
|
if (dst.type != src.type)
|
2021-03-31 22:54:46 +03:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
2021-01-21 00:58:09 +03:00
|
|
|
}
|
|
|
|
|
2021-01-22 22:57:44 +03:00
|
|
|
/**
|
|
|
|
Compute a difference score for two context objects
|
|
|
|
Returns 0 if the two contexts are the same
|
|
|
|
Returns > 0 if different but compatible
|
2021-01-23 00:54:43 +03:00
|
|
|
Returns INT_MAX if incompatible
|
2021-01-22 22:57:44 +03:00
|
|
|
*/
|
2021-10-02 01:38:39 +03:00
|
|
|
static int
|
|
|
|
ctx_diff(const ctx_t *src, const ctx_t *dst)
|
2021-01-22 22:57:44 +03:00
|
|
|
{
|
2021-03-05 23:45:44 +03:00
|
|
|
// Can only lookup the first version in the chain
|
|
|
|
if (dst->chain_depth != 0)
|
|
|
|
return INT_MAX;
|
|
|
|
|
|
|
|
// Blocks with depth > 0 always produce new versions
|
|
|
|
// Sidechains cannot overlap
|
|
|
|
if (src->chain_depth != 0)
|
|
|
|
return INT_MAX;
|
|
|
|
|
2021-01-22 22:57:44 +03:00
|
|
|
if (dst->stack_size != src->stack_size)
|
2021-01-23 00:54:43 +03:00
|
|
|
return INT_MAX;
|
2021-01-22 22:57:44 +03:00
|
|
|
|
2021-02-10 00:24:06 +03:00
|
|
|
if (dst->sp_offset != src->sp_offset)
|
|
|
|
return INT_MAX;
|
|
|
|
|
2021-01-22 22:57:44 +03:00
|
|
|
// Difference sum
|
|
|
|
int diff = 0;
|
|
|
|
|
2021-03-31 22:54:46 +03:00
|
|
|
// Check the type of self
|
|
|
|
int self_diff = type_diff(src->self_type, dst->self_type);
|
|
|
|
|
|
|
|
if (self_diff == INT_MAX)
|
|
|
|
return INT_MAX;
|
|
|
|
|
|
|
|
diff += self_diff;
|
|
|
|
|
2021-04-13 21:32:21 +03:00
|
|
|
// For each local type we track
|
|
|
|
for (size_t i = 0; i < MAX_LOCAL_TYPES; ++i)
|
|
|
|
{
|
|
|
|
val_type_t t_src = src->local_types[i];
|
|
|
|
val_type_t t_dst = dst->local_types[i];
|
|
|
|
int temp_diff = type_diff(t_src, t_dst);
|
|
|
|
|
|
|
|
if (temp_diff == INT_MAX)
|
|
|
|
return INT_MAX;
|
|
|
|
|
|
|
|
diff += temp_diff;
|
|
|
|
}
|
2021-03-31 22:54:46 +03:00
|
|
|
|
|
|
|
// For each value on the temp stack
|
|
|
|
for (size_t i = 0; i < src->stack_size; ++i)
|
2021-01-22 22:57:44 +03:00
|
|
|
{
|
2021-07-28 09:35:01 +03:00
|
|
|
temp_type_mapping_t m_src = ctx_get_opnd_mapping(src, OPND_STACK(i));
|
|
|
|
temp_type_mapping_t m_dst = ctx_get_opnd_mapping(dst, OPND_STACK(i));
|
|
|
|
|
|
|
|
if (m_dst.mapping.kind != m_src.mapping.kind) {
|
|
|
|
if (m_dst.mapping.kind == TEMP_STACK) {
|
|
|
|
// We can safely drop information about the source of the temp
|
|
|
|
// stack operand.
|
|
|
|
diff += 1;
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else {
|
2021-07-28 09:35:01 +03:00
|
|
|
return INT_MAX;
|
|
|
|
}
|
2021-09-29 23:11:50 +03:00
|
|
|
}
|
|
|
|
else if (m_dst.mapping.idx != m_src.mapping.idx) {
|
2021-07-28 09:35:01 +03:00
|
|
|
return INT_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
int temp_diff = type_diff(m_src.type, m_dst.type);
|
2021-03-31 22:54:46 +03:00
|
|
|
|
|
|
|
if (temp_diff == INT_MAX)
|
|
|
|
return INT_MAX;
|
|
|
|
|
|
|
|
diff += temp_diff;
|
2021-01-22 22:57:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return diff;
|
|
|
|
}
|
|
|
|
|
2021-03-04 23:31:37 +03:00
|
|
|
// Get all blocks for a particular place in an iseq.
|
2021-10-02 01:38:39 +03:00
|
|
|
static rb_yjit_block_array_t
|
2021-03-25 01:07:26 +03:00
|
|
|
yjit_get_version_array(const rb_iseq_t *iseq, unsigned idx)
|
2021-02-13 01:12:18 +03:00
|
|
|
{
|
2022-03-23 22:19:48 +03:00
|
|
|
struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
|
2021-03-04 20:05:18 +03:00
|
|
|
|
2021-03-07 02:46:56 +03:00
|
|
|
if (rb_darray_size(body->yjit_blocks) == 0) {
|
2021-02-13 01:12:18 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
2021-03-04 20:05:18 +03:00
|
|
|
|
2021-03-07 02:46:56 +03:00
|
|
|
RUBY_ASSERT((unsigned)rb_darray_size(body->yjit_blocks) == body->iseq_size);
|
|
|
|
return rb_darray_get(body->yjit_blocks, idx);
|
2021-02-13 01:12:18 +03:00
|
|
|
}
|
|
|
|
|
2021-03-04 20:05:18 +03:00
|
|
|
// Count the number of block versions matching a given blockid
|
|
|
|
static size_t get_num_versions(blockid_t blockid)
|
|
|
|
{
|
2021-03-25 01:07:26 +03:00
|
|
|
return rb_darray_size(yjit_get_version_array(blockid.iseq, blockid.idx));
|
2021-03-04 20:05:18 +03:00
|
|
|
}
|
|
|
|
|
2021-02-19 23:03:12 +03:00
|
|
|
// Keep track of a block version. Block should be fully constructed.
|
2021-02-13 01:12:18 +03:00
|
|
|
static void
|
2021-11-20 07:44:13 +03:00
|
|
|
add_block_version(block_t *block)
|
2021-01-25 07:08:11 +03:00
|
|
|
{
|
2021-11-20 07:44:13 +03:00
|
|
|
const blockid_t blockid = block->blockid;
|
|
|
|
const rb_iseq_t *iseq = blockid.iseq;
|
2022-03-23 22:19:48 +03:00
|
|
|
struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
|
2021-02-13 01:12:18 +03:00
|
|
|
|
2021-08-26 00:00:45 +03:00
|
|
|
// Function entry blocks must have stack size 0
|
|
|
|
RUBY_ASSERT(!(block->blockid.idx == 0 && block->ctx.stack_size > 0));
|
|
|
|
|
2021-03-07 02:46:56 +03:00
|
|
|
// Ensure yjit_blocks is initialized for this iseq
|
|
|
|
if (rb_darray_size(body->yjit_blocks) == 0) {
|
|
|
|
// Initialize yjit_blocks to be as wide as body->iseq_encoded
|
2021-02-17 05:03:20 +03:00
|
|
|
int32_t casted = (int32_t)body->iseq_size;
|
|
|
|
if ((unsigned)casted != body->iseq_size) {
|
|
|
|
rb_bug("iseq too large");
|
|
|
|
}
|
2022-02-15 17:55:53 +03:00
|
|
|
|
|
|
|
rb_darray_make(&body->yjit_blocks, casted);
|
2021-02-16 19:15:29 +03:00
|
|
|
|
2021-08-24 13:32:07 +03:00
|
|
|
#if YJIT_STATS
|
2021-02-16 19:15:29 +03:00
|
|
|
// First block compiled for this iseq
|
2021-06-28 20:06:03 +03:00
|
|
|
yjit_runtime_counters.compiled_iseq_count++;
|
2021-02-25 23:10:38 +03:00
|
|
|
#endif
|
2021-02-13 01:12:18 +03:00
|
|
|
}
|
2021-02-05 23:49:02 +03:00
|
|
|
|
2022-02-16 18:34:06 +03:00
|
|
|
RUBY_ASSERT(blockid.idx < rb_darray_size(body->yjit_blocks));
|
2021-03-07 02:46:56 +03:00
|
|
|
rb_yjit_block_array_t *block_array_ref = rb_darray_ref(body->yjit_blocks, blockid.idx);
|
2021-01-25 07:08:11 +03:00
|
|
|
|
2021-03-04 23:31:37 +03:00
|
|
|
// Add the new block
|
2022-02-15 17:55:53 +03:00
|
|
|
rb_darray_append(block_array_ref, block);
|
2021-01-25 07:08:11 +03:00
|
|
|
|
2021-02-13 01:12:18 +03:00
|
|
|
{
|
|
|
|
// By writing the new block to the iseq, the iseq now
|
|
|
|
// contains new references to Ruby objects. Run write barriers.
|
2021-08-08 08:49:31 +03:00
|
|
|
cme_dependency_t *cme_dep;
|
|
|
|
rb_darray_foreach(block->cme_dependencies, cme_dependency_idx, cme_dep) {
|
|
|
|
RB_OBJ_WRITTEN(iseq, Qundef, cme_dep->receiver_klass);
|
|
|
|
RB_OBJ_WRITTEN(iseq, Qundef, cme_dep->callee_cme);
|
|
|
|
}
|
2021-02-19 23:03:12 +03:00
|
|
|
|
2021-02-25 23:10:38 +03:00
|
|
|
// Run write barriers for all objects in generated code.
|
2021-02-19 23:03:12 +03:00
|
|
|
uint32_t *offset_element;
|
|
|
|
rb_darray_foreach(block->gc_object_offsets, offset_idx, offset_element) {
|
|
|
|
uint32_t offset_to_value = *offset_element;
|
|
|
|
uint8_t *value_address = cb_get_ptr(cb, offset_to_value);
|
|
|
|
|
|
|
|
VALUE object;
|
|
|
|
memcpy(&object, value_address, SIZEOF_VALUE);
|
|
|
|
RB_OBJ_WRITTEN(iseq, Qundef, object);
|
|
|
|
}
|
2021-02-13 01:12:18 +03:00
|
|
|
}
|
2021-09-15 20:59:50 +03:00
|
|
|
|
|
|
|
#if YJIT_STATS
|
|
|
|
yjit_runtime_counters.compiled_block_count++;
|
|
|
|
#endif
|
2021-01-25 07:08:11 +03:00
|
|
|
}
|
|
|
|
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
static ptrdiff_t
|
|
|
|
branch_code_size(const branch_t *branch)
|
|
|
|
{
|
|
|
|
return branch->end_addr - branch->start_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate code for a branch, possibly rewriting and changing the size of it
|
|
|
|
static void
|
|
|
|
regenerate_branch(codeblock_t *cb, branch_t *branch)
|
|
|
|
{
|
|
|
|
if (branch->start_addr < cb_get_ptr(cb, yjit_codepage_frozen_bytes)) {
|
|
|
|
// Generating this branch would modify frozen bytes. Do nothing.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint32_t old_write_pos = cb->write_pos;
|
|
|
|
const bool branch_terminates_block = branch->end_addr == branch->block->end_addr;
|
|
|
|
|
|
|
|
RUBY_ASSERT(branch->dst_addrs[0] != NULL);
|
|
|
|
|
|
|
|
cb_set_write_ptr(cb, branch->start_addr);
|
|
|
|
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
|
|
|
branch->end_addr = cb_get_write_ptr(cb);
|
|
|
|
|
|
|
|
if (branch_terminates_block) {
|
|
|
|
// Adjust block size
|
|
|
|
branch->block->end_addr = branch->end_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// cb->write_pos is both a write cursor and a marker for the end of
|
|
|
|
// everything written out so far. Leave cb->write_pos at the end of the
|
|
|
|
// block before returning. This function only ever bump or retain the end
|
|
|
|
// of block marker since that's what the majority of callers want. When the
|
|
|
|
// branch sits at the very end of the codeblock and it shrinks after
|
|
|
|
// regeneration, it's up to the caller to drop bytes off the end to
|
|
|
|
// not leave a gap and implement branch->shape.
|
|
|
|
if (old_write_pos > cb->write_pos) {
|
|
|
|
// We rewound cb->write_pos to generate the branch, now restore it.
|
|
|
|
cb_set_pos(cb, old_write_pos);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// The branch sits at the end of cb and consumed some memory.
|
|
|
|
// Keep cb->write_pos.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-20 00:07:27 +03:00
|
|
|
// Create a new outgoing branch entry for a block
|
|
|
|
static branch_t*
|
2021-09-29 21:58:01 +03:00
|
|
|
make_branch_entry(block_t *block, const ctx_t *src_ctx, branchgen_fn gen_fn)
|
2021-04-20 00:07:27 +03:00
|
|
|
{
|
|
|
|
RUBY_ASSERT(block != NULL);
|
|
|
|
|
|
|
|
// Allocate and zero-initialize
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_t *branch = calloc(1, sizeof(branch_t));
|
2021-04-20 00:07:27 +03:00
|
|
|
|
|
|
|
branch->block = block;
|
2021-12-16 00:13:23 +03:00
|
|
|
(void)src_ctx; // Unused for now
|
2021-04-20 00:07:27 +03:00
|
|
|
branch->gen_fn = gen_fn;
|
|
|
|
branch->shape = SHAPE_DEFAULT;
|
|
|
|
|
|
|
|
// Add to the list of outgoing branches for the block
|
|
|
|
rb_darray_append(&block->outgoing, branch);
|
|
|
|
|
|
|
|
return branch;
|
|
|
|
}
|
|
|
|
|
2020-12-17 01:07:18 +03:00
|
|
|
// Retrieve a basic block version for an (iseq, idx) tuple
|
2021-10-02 01:38:39 +03:00
|
|
|
static block_t *
|
|
|
|
find_block_version(blockid_t blockid, const ctx_t *ctx)
|
2020-12-17 01:07:18 +03:00
|
|
|
{
|
2021-03-25 01:07:26 +03:00
|
|
|
rb_yjit_block_array_t versions = yjit_get_version_array(blockid.iseq, blockid.idx);
|
2020-12-17 01:07:18 +03:00
|
|
|
|
2021-01-23 00:54:43 +03:00
|
|
|
// Best match found
|
2021-09-29 21:58:01 +03:00
|
|
|
block_t *best_version = NULL;
|
2021-01-23 00:54:43 +03:00
|
|
|
int best_diff = INT_MAX;
|
2021-01-08 23:18:03 +03:00
|
|
|
|
2021-01-23 00:54:43 +03:00
|
|
|
// For each version matching the blockid
|
2021-03-04 23:31:37 +03:00
|
|
|
rb_darray_for(versions, idx) {
|
|
|
|
block_t *version = rb_darray_get(versions, idx);
|
|
|
|
int diff = ctx_diff(ctx, &version->ctx);
|
2021-03-04 20:05:18 +03:00
|
|
|
|
|
|
|
// Note that we always prefer the first matching
|
|
|
|
// version because of inline-cache chains
|
|
|
|
if (diff < best_diff) {
|
2021-01-23 00:54:43 +03:00
|
|
|
best_version = version;
|
|
|
|
best_diff = diff;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-24 07:16:48 +03:00
|
|
|
// If greedy versioning is enabled
|
|
|
|
if (rb_yjit_opts.greedy_versioning)
|
|
|
|
{
|
|
|
|
// If we're below the version limit, don't settle for an imperfect match
|
2021-06-23 20:55:34 +03:00
|
|
|
if ((uint32_t)rb_darray_size(versions) + 1 < rb_yjit_opts.max_versions && best_diff > 0) {
|
2021-04-24 07:16:48 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-23 00:54:43 +03:00
|
|
|
return best_version;
|
2020-12-17 01:07:18 +03:00
|
|
|
}
|
2021-01-27 21:02:55 +03:00
|
|
|
|
2021-04-24 07:16:48 +03:00
|
|
|
// Produce a generic context when the block version limit is hit for a blockid
|
|
|
|
// Note that this will mutate the ctx argument
|
2021-11-20 07:44:13 +03:00
|
|
|
static ctx_t
|
|
|
|
limit_block_versions(blockid_t blockid, const ctx_t *ctx)
|
2021-04-24 07:16:48 +03:00
|
|
|
{
|
|
|
|
// Guard chains implement limits separately, do nothing
|
|
|
|
if (ctx->chain_depth > 0)
|
2021-11-20 07:44:13 +03:00
|
|
|
return *ctx;
|
2021-04-24 07:16:48 +03:00
|
|
|
|
|
|
|
// If this block version we're about to add will hit the version limit
|
2021-11-20 07:44:13 +03:00
|
|
|
if (get_num_versions(blockid) + 1 >= rb_yjit_opts.max_versions) {
|
2021-04-24 07:16:48 +03:00
|
|
|
// Produce a generic context that stores no type information,
|
2021-11-20 07:44:13 +03:00
|
|
|
// but still respects the stack_size and sp_offset constraints.
|
2021-04-24 07:16:48 +03:00
|
|
|
// This new context will then match all future requests.
|
|
|
|
ctx_t generic_ctx = DEFAULT_CTX;
|
|
|
|
generic_ctx.stack_size = ctx->stack_size;
|
|
|
|
generic_ctx.sp_offset = ctx->sp_offset;
|
|
|
|
|
|
|
|
// Mutate the incoming context
|
2021-11-20 07:44:13 +03:00
|
|
|
return generic_ctx;
|
2021-04-24 07:16:48 +03:00
|
|
|
}
|
2021-11-20 07:44:13 +03:00
|
|
|
|
|
|
|
return *ctx;
|
2021-04-24 07:16:48 +03:00
|
|
|
}
|
|
|
|
|
2021-11-20 07:44:13 +03:00
|
|
|
static void yjit_free_block(block_t *block);
|
2021-12-07 22:27:49 +03:00
|
|
|
static void block_array_remove(rb_yjit_block_array_t block_array, block_t *block);
|
2021-11-20 07:44:13 +03:00
|
|
|
|
|
|
|
// Immediately compile a series of block versions at a starting point and
|
|
|
|
// return the starting block.
|
2021-10-02 01:38:39 +03:00
|
|
|
static block_t *
|
|
|
|
gen_block_version(blockid_t blockid, const ctx_t *start_ctx, rb_execution_context_t *ec)
|
2021-01-08 23:18:03 +03:00
|
|
|
{
|
2021-11-20 07:44:13 +03:00
|
|
|
// Small array to keep track of all the blocks compiled per invocation. We
|
|
|
|
// tend to have small batches since we often break up compilation with lazy
|
|
|
|
// stubs. Compilation is successful only if the whole batch is successful.
|
|
|
|
enum { MAX_PER_BATCH = 64 };
|
|
|
|
block_t *batch[MAX_PER_BATCH];
|
|
|
|
int compiled_count = 0;
|
|
|
|
bool batch_success = true;
|
|
|
|
block_t *block;
|
2021-01-14 21:33:19 +03:00
|
|
|
|
2021-01-19 01:03:04 +03:00
|
|
|
// Generate code for the first block
|
2021-11-20 07:44:13 +03:00
|
|
|
block = gen_single_block(blockid, start_ctx, ec);
|
2021-12-08 20:24:37 +03:00
|
|
|
if (block) {
|
2021-11-20 07:44:13 +03:00
|
|
|
// Track the block
|
|
|
|
add_block_version(block);
|
2021-01-08 23:18:03 +03:00
|
|
|
|
2021-11-20 07:44:13 +03:00
|
|
|
batch[compiled_count] = block;
|
|
|
|
compiled_count++;
|
|
|
|
}
|
2021-12-08 20:24:37 +03:00
|
|
|
batch_success = block;
|
2021-01-19 01:03:04 +03:00
|
|
|
|
|
|
|
// For each successor block to compile
|
2021-11-20 07:44:13 +03:00
|
|
|
while (batch_success) {
|
2021-04-20 00:07:27 +03:00
|
|
|
// If the previous block compiled doesn't have outgoing branches, stop
|
|
|
|
if (rb_darray_size(block->outgoing) == 0) {
|
2021-01-19 01:03:04 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-11-20 07:44:13 +03:00
|
|
|
// Get the last outgoing branch from the previous block. Blocks can use
|
|
|
|
// gen_direct_jump() to request a block to be placed immediately after.
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_t *last_branch = rb_darray_back(block->outgoing);
|
2021-01-19 01:03:04 +03:00
|
|
|
|
|
|
|
// If there is no next block to compile, stop
|
|
|
|
if (last_branch->dst_addrs[0] || last_branch->dst_addrs[1]) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (last_branch->targets[0].iseq == NULL) {
|
|
|
|
rb_bug("invalid target for last branch");
|
|
|
|
}
|
|
|
|
|
2021-11-20 07:44:13 +03:00
|
|
|
// Generate code for the current block using context from the last branch.
|
|
|
|
blockid_t requested_id = last_branch->targets[0];
|
|
|
|
const ctx_t *requested_ctx = &last_branch->target_ctxs[0];
|
2021-12-08 20:24:37 +03:00
|
|
|
|
|
|
|
batch_success = compiled_count < MAX_PER_BATCH;
|
|
|
|
if (batch_success) {
|
|
|
|
block = gen_single_block(requested_id, requested_ctx, ec);
|
|
|
|
batch_success = block;
|
|
|
|
}
|
2021-04-24 07:16:48 +03:00
|
|
|
|
2021-11-20 07:44:13 +03:00
|
|
|
// If the batch failed, stop
|
|
|
|
if (!batch_success) {
|
|
|
|
break;
|
|
|
|
}
|
2021-01-19 01:03:04 +03:00
|
|
|
|
2021-11-20 07:44:13 +03:00
|
|
|
// Connect the last branch and the new block
|
2021-11-04 23:05:41 +03:00
|
|
|
last_branch->dst_addrs[0] = block->start_addr;
|
2021-04-20 00:07:27 +03:00
|
|
|
rb_darray_append(&block->incoming, last_branch);
|
|
|
|
last_branch->blocks[0] = block;
|
2021-02-20 00:04:23 +03:00
|
|
|
|
2021-11-04 23:05:41 +03:00
|
|
|
// This block should immediately follow the last branch
|
|
|
|
RUBY_ASSERT(block->start_addr == last_branch->end_addr);
|
2021-11-20 07:44:13 +03:00
|
|
|
|
|
|
|
// Track the block
|
|
|
|
add_block_version(block);
|
|
|
|
|
|
|
|
batch[compiled_count] = block;
|
|
|
|
compiled_count++;
|
2021-01-19 01:03:04 +03:00
|
|
|
}
|
2021-01-08 23:18:03 +03:00
|
|
|
|
2021-11-20 07:44:13 +03:00
|
|
|
if (batch_success) {
|
|
|
|
// Success. Return first block in the batch.
|
|
|
|
RUBY_ASSERT(compiled_count > 0);
|
|
|
|
return batch[0];
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// The batch failed. Free everything in the batch
|
|
|
|
for (int block_idx = 0; block_idx < compiled_count; block_idx++) {
|
2021-12-07 22:27:49 +03:00
|
|
|
block_t *const to_free = batch[block_idx];
|
|
|
|
|
|
|
|
// Undo add_block_version()
|
|
|
|
rb_yjit_block_array_t versions = yjit_get_version_array(to_free->blockid.iseq, to_free->blockid.idx);
|
|
|
|
block_array_remove(versions, to_free);
|
|
|
|
|
|
|
|
// Deallocate
|
|
|
|
yjit_free_block(to_free);
|
2021-11-20 07:44:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#if YJIT_STATS
|
|
|
|
yjit_runtime_counters.compilation_failure++;
|
|
|
|
#endif
|
|
|
|
return NULL;
|
|
|
|
}
|
2021-01-08 23:18:03 +03:00
|
|
|
}
|
|
|
|
|
2021-01-13 22:14:16 +03:00
|
|
|
// Generate a block version that is an entry point inserted into an iseq
|
2021-10-02 01:38:39 +03:00
|
|
|
static uint8_t *
|
|
|
|
gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_context_t *ec)
|
2021-01-13 22:14:16 +03:00
|
|
|
{
|
2021-08-03 20:31:26 +03:00
|
|
|
// If we aren't at PC 0, don't generate code
|
|
|
|
// See yjit_pc_guard
|
2022-03-23 22:19:48 +03:00
|
|
|
if (ISEQ_BODY(iseq)->iseq_encoded != ec->cfp->pc) {
|
2021-08-03 20:31:26 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-01-13 22:14:16 +03:00
|
|
|
// The entry context makes no assumptions about types
|
2021-01-19 01:03:04 +03:00
|
|
|
blockid_t blockid = { iseq, insn_idx };
|
2021-01-14 21:33:19 +03:00
|
|
|
|
2021-10-27 02:57:30 +03:00
|
|
|
rb_vm_barrier();
|
2021-11-20 07:44:13 +03:00
|
|
|
// Write the interpreter entry prologue. Might be NULL when out of memory.
|
2021-09-29 21:58:01 +03:00
|
|
|
uint8_t *code_ptr = yjit_entry_prologue(cb, iseq);
|
2021-01-13 22:14:16 +03:00
|
|
|
|
2021-01-19 01:03:04 +03:00
|
|
|
// Try to generate code for the entry block
|
2021-09-29 21:58:01 +03:00
|
|
|
block_t *block = gen_block_version(blockid, &DEFAULT_CTX, ec);
|
2021-01-13 22:14:16 +03:00
|
|
|
|
2021-10-27 02:57:30 +03:00
|
|
|
cb_mark_all_executable(ocb);
|
|
|
|
cb_mark_all_executable(cb);
|
|
|
|
|
2021-01-13 23:18:35 +03:00
|
|
|
// If we couldn't generate any code
|
2021-11-20 07:44:13 +03:00
|
|
|
if (!block || block->end_idx == insn_idx) {
|
2021-01-13 23:18:35 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-01-13 22:14:16 +03:00
|
|
|
return code_ptr;
|
|
|
|
}
|
|
|
|
|
2020-12-17 05:45:51 +03:00
|
|
|
// Called by the generated code when a branch stub is executed
|
|
|
|
// Triggers compilation of branches and code patching
|
2021-03-12 20:22:19 +03:00
|
|
|
static uint8_t *
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_context_t *ec)
|
2020-12-17 05:45:51 +03:00
|
|
|
{
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
uint8_t *dst_addr = NULL;
|
2021-01-20 20:44:24 +03:00
|
|
|
|
2021-04-06 17:36:00 +03:00
|
|
|
// Stop other ractors since we are going to patch machine code.
|
|
|
|
// This is how the GC does it.
|
2021-01-20 20:44:24 +03:00
|
|
|
RB_VM_LOCK_ENTER();
|
2021-04-06 17:36:00 +03:00
|
|
|
rb_vm_barrier();
|
2021-03-23 03:12:34 +03:00
|
|
|
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
const ptrdiff_t branch_size_on_entry = branch_code_size(branch);
|
|
|
|
|
2021-04-20 00:07:27 +03:00
|
|
|
RUBY_ASSERT(branch != NULL);
|
2021-01-22 21:29:09 +03:00
|
|
|
RUBY_ASSERT(target_idx < 2);
|
2020-12-17 22:51:56 +03:00
|
|
|
blockid_t target = branch->targets[target_idx];
|
2021-09-29 21:58:01 +03:00
|
|
|
const ctx_t *target_ctx = &branch->target_ctxs[target_idx];
|
2020-12-17 22:51:56 +03:00
|
|
|
|
2021-04-07 22:36:02 +03:00
|
|
|
// If this branch has already been patched, return the dst address
|
|
|
|
// Note: ractors can cause the same stub to be hit multiple times
|
2021-04-20 00:07:27 +03:00
|
|
|
if (branch->blocks[target_idx]) {
|
|
|
|
dst_addr = branch->dst_addrs[target_idx];
|
2021-04-07 22:36:02 +03:00
|
|
|
}
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
else {
|
2021-10-27 02:57:30 +03:00
|
|
|
rb_vm_barrier();
|
|
|
|
|
2021-04-07 22:36:02 +03:00
|
|
|
// :stub-sp-flush:
|
|
|
|
// Generated code do stack operations without modifying cfp->sp, while the
|
|
|
|
// cfp->sp tells the GC what values on the stack to root. Generated code
|
|
|
|
// generally takes care of updating cfp->sp when it calls runtime routines that
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
// could trigger GC, but it's inconvenient to do it before calling this function.
|
|
|
|
// So we do it here instead.
|
2021-04-07 22:36:02 +03:00
|
|
|
VALUE *const original_interp_sp = ec->cfp->sp;
|
|
|
|
ec->cfp->sp += target_ctx->sp_offset;
|
|
|
|
|
|
|
|
// Update the PC in the current CFP, because it
|
|
|
|
// may be out of sync in JITted code
|
2021-03-25 01:07:26 +03:00
|
|
|
ec->cfp->pc = yjit_iseq_pc_at_idx(target.iseq, target.idx);
|
2021-04-07 22:36:02 +03:00
|
|
|
|
|
|
|
// Try to find an existing compiled version of this block
|
2021-09-29 21:58:01 +03:00
|
|
|
block_t *p_block = find_block_version(target, target_ctx);
|
2021-04-07 22:36:02 +03:00
|
|
|
|
|
|
|
// If this block hasn't yet been compiled
|
|
|
|
if (!p_block) {
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
const uint8_t branch_old_shape = branch->shape;
|
|
|
|
bool branch_modified = false;
|
|
|
|
|
2021-04-07 22:36:02 +03:00
|
|
|
// If the new block can be generated right after the branch (at cb->write_pos)
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
if (cb_get_write_ptr(cb) == branch->end_addr) {
|
2021-04-20 00:07:27 +03:00
|
|
|
// This branch should be terminating its block
|
2021-11-04 23:05:41 +03:00
|
|
|
RUBY_ASSERT(branch->end_addr == branch->block->end_addr);
|
2021-04-20 00:07:27 +03:00
|
|
|
|
2021-04-07 22:36:02 +03:00
|
|
|
// Change the branch shape to indicate the target block will be placed next
|
|
|
|
branch->shape = (uint8_t)target_idx;
|
|
|
|
|
|
|
|
// Rewrite the branch with the new, potentially more compact shape
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
regenerate_branch(cb, branch);
|
|
|
|
branch_modified = true;
|
|
|
|
|
|
|
|
// Ensure that the branch terminates the codeblock just like
|
|
|
|
// before entering this if block. This drops bytes off the end
|
|
|
|
// in case we shrank the branch when regenerating.
|
|
|
|
cb_set_write_ptr(cb, branch->end_addr);
|
2021-04-07 22:36:02 +03:00
|
|
|
}
|
2021-04-06 19:19:45 +03:00
|
|
|
|
2021-04-20 00:07:27 +03:00
|
|
|
// Compile the new block version
|
2021-04-07 22:36:02 +03:00
|
|
|
p_block = gen_block_version(target, target_ctx, ec);
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
|
|
|
|
if (!p_block && branch_modified) {
|
|
|
|
// We couldn't generate a new block for the branch, but we modified the branch.
|
|
|
|
// Restore the branch by regenerating it.
|
|
|
|
branch->shape = branch_old_shape;
|
|
|
|
regenerate_branch(cb, branch);
|
|
|
|
}
|
2021-04-06 19:19:45 +03:00
|
|
|
}
|
|
|
|
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
if (p_block) {
|
|
|
|
// Branch shape should reflect layout
|
|
|
|
RUBY_ASSERT(!(branch->shape == (uint8_t)target_idx && p_block->start_addr != branch->end_addr));
|
2020-12-17 05:45:51 +03:00
|
|
|
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
// Add this branch to the list of incoming branches for the target
|
|
|
|
rb_darray_append(&p_block->incoming, branch);
|
2021-01-13 23:18:35 +03:00
|
|
|
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
// Update the branch target address
|
|
|
|
dst_addr = p_block->start_addr;
|
|
|
|
branch->dst_addrs[target_idx] = dst_addr;
|
2021-04-07 22:36:02 +03:00
|
|
|
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
// Mark this branch target as patched (no longer a stub)
|
|
|
|
branch->blocks[target_idx] = p_block;
|
2020-12-17 22:51:56 +03:00
|
|
|
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
// Rewrite the branch with the new jump target address
|
|
|
|
regenerate_branch(cb, branch);
|
|
|
|
|
|
|
|
// Restore interpreter sp, since the code hitting the stub expects the original.
|
|
|
|
ec->cfp->sp = original_interp_sp;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// Failed to service the stub by generating a new block so now we
|
|
|
|
// need to exit to the interpreter at the stubbed location. We are
|
|
|
|
// intentionally *not* restoring original_interp_sp. At the time of
|
|
|
|
// writing, reconstructing interpreter state only involves setting
|
|
|
|
// cfp->sp and cfp->pc. We set both before trying to generate the
|
|
|
|
// block. All there is left to do to exit is to pop the native
|
|
|
|
// frame. We do that in code_for_exit_from_stub.
|
|
|
|
dst_addr = code_for_exit_from_stub;
|
|
|
|
}
|
2021-10-27 02:57:30 +03:00
|
|
|
|
|
|
|
cb_mark_all_executable(ocb);
|
|
|
|
cb_mark_all_executable(cb);
|
2021-04-07 22:36:02 +03:00
|
|
|
}
|
2020-12-17 05:45:51 +03:00
|
|
|
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
const ptrdiff_t new_branch_size = branch_code_size(branch);
|
|
|
|
RUBY_ASSERT_ALWAYS(new_branch_size >= 0);
|
|
|
|
RUBY_ASSERT_ALWAYS(new_branch_size <= branch_size_on_entry && "branch stubs should not enlarge branches");
|
|
|
|
|
2021-01-20 20:44:24 +03:00
|
|
|
RB_VM_LOCK_LEAVE();
|
|
|
|
|
2020-12-17 05:45:51 +03:00
|
|
|
// Return a pointer to the compiled block version
|
2021-01-12 22:56:43 +03:00
|
|
|
return dst_addr;
|
2020-12-17 05:45:51 +03:00
|
|
|
}
|
|
|
|
|
2020-12-17 01:07:18 +03:00
|
|
|
// Get a version or stub corresponding to a branch target
|
2021-10-02 01:38:39 +03:00
|
|
|
static uint8_t *
|
|
|
|
get_branch_target(
|
2021-01-08 23:18:03 +03:00
|
|
|
blockid_t target,
|
2021-09-29 21:58:01 +03:00
|
|
|
const ctx_t *ctx,
|
|
|
|
branch_t *branch,
|
2021-01-08 23:18:03 +03:00
|
|
|
uint32_t target_idx
|
|
|
|
)
|
2020-12-17 01:07:18 +03:00
|
|
|
{
|
2021-01-19 21:28:52 +03:00
|
|
|
//fprintf(stderr, "get_branch_target, block (%p, %d)\n", target.iseq, target.idx);
|
|
|
|
|
2021-09-29 21:58:01 +03:00
|
|
|
block_t *p_block = find_block_version(target, ctx);
|
2020-12-17 01:07:18 +03:00
|
|
|
|
2021-04-20 00:07:27 +03:00
|
|
|
// If the block already exists
|
2021-11-04 19:30:30 +03:00
|
|
|
if (p_block) {
|
2021-01-13 23:18:35 +03:00
|
|
|
// Add an incoming branch for this version
|
2021-04-20 00:07:27 +03:00
|
|
|
rb_darray_append(&p_block->incoming, branch);
|
|
|
|
branch->blocks[target_idx] = p_block;
|
2021-01-13 23:18:35 +03:00
|
|
|
|
2021-02-20 00:04:23 +03:00
|
|
|
// Return a pointer to the compiled code
|
2021-11-04 23:05:41 +03:00
|
|
|
return p_block->start_addr;
|
2021-01-12 22:56:43 +03:00
|
|
|
}
|
2020-12-17 01:07:18 +03:00
|
|
|
|
2021-11-04 19:30:30 +03:00
|
|
|
// Do we have enough memory for a stub?
|
|
|
|
const long MAX_CODE_SIZE = 64;
|
|
|
|
if (ocb->write_pos + MAX_CODE_SIZE >= cb->mem_size) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-04-20 00:07:27 +03:00
|
|
|
// Generate an outlined stub that will call branch_stub_hit()
|
2021-09-29 21:58:01 +03:00
|
|
|
uint8_t *stub_addr = cb_get_ptr(ocb, ocb->write_pos);
|
2020-12-17 22:51:56 +03:00
|
|
|
|
2021-03-02 04:43:58 +03:00
|
|
|
// Call branch_stub_hit(branch_idx, target_idx, ec)
|
|
|
|
mov(ocb, C_ARG_REGS[2], REG_EC);
|
2021-11-04 19:30:30 +03:00
|
|
|
mov(ocb, C_ARG_REGS[1], imm_opnd(target_idx));
|
2021-04-20 00:07:27 +03:00
|
|
|
mov(ocb, C_ARG_REGS[0], const_ptr_opnd(branch));
|
2020-12-17 05:45:51 +03:00
|
|
|
call_ptr(ocb, REG0, (void *)&branch_stub_hit);
|
2020-12-17 01:07:18 +03:00
|
|
|
|
2020-12-17 05:45:51 +03:00
|
|
|
// Jump to the address returned by the
|
|
|
|
// branch_stub_hit call
|
|
|
|
jmp_rm(ocb, RAX);
|
2020-12-17 01:07:18 +03:00
|
|
|
|
2021-11-04 19:30:30 +03:00
|
|
|
RUBY_ASSERT(cb_get_ptr(ocb, ocb->write_pos) - stub_addr <= MAX_CODE_SIZE);
|
|
|
|
|
2020-12-17 01:07:18 +03:00
|
|
|
return stub_addr;
|
|
|
|
}
|
|
|
|
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
|
|
|
gen_branch(
|
2021-09-29 21:58:01 +03:00
|
|
|
jitstate_t *jit,
|
|
|
|
const ctx_t *src_ctx,
|
2021-03-10 19:18:17 +03:00
|
|
|
blockid_t target0,
|
2021-09-29 21:58:01 +03:00
|
|
|
const ctx_t *ctx0,
|
2021-03-10 19:18:17 +03:00
|
|
|
blockid_t target1,
|
2021-09-29 21:58:01 +03:00
|
|
|
const ctx_t *ctx1,
|
2021-01-08 23:18:03 +03:00
|
|
|
branchgen_fn gen_fn
|
|
|
|
)
|
2020-12-17 01:07:18 +03:00
|
|
|
{
|
2021-01-22 21:29:09 +03:00
|
|
|
RUBY_ASSERT(target0.iseq != NULL);
|
2021-04-20 00:07:27 +03:00
|
|
|
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_t *branch = make_branch_entry(jit->block, src_ctx, gen_fn);
|
2021-04-20 00:07:27 +03:00
|
|
|
branch->targets[0] = target0;
|
|
|
|
branch->targets[1] = target1;
|
|
|
|
branch->target_ctxs[0] = *ctx0;
|
|
|
|
branch->target_ctxs[1] = ctx1? *ctx1:DEFAULT_CTX;
|
2021-01-13 23:18:35 +03:00
|
|
|
|
2021-01-19 19:11:11 +03:00
|
|
|
// Get the branch targets or stubs
|
2021-04-20 00:07:27 +03:00
|
|
|
branch->dst_addrs[0] = get_branch_target(target0, ctx0, branch, 0);
|
|
|
|
branch->dst_addrs[1] = ctx1? get_branch_target(target1, ctx1, branch, 1):NULL;
|
2021-01-19 19:11:11 +03:00
|
|
|
|
|
|
|
// Call the branch generation function
|
2021-11-04 23:05:41 +03:00
|
|
|
branch->start_addr = cb_get_write_ptr(cb);
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
regenerate_branch(cb, branch);
|
2021-01-19 19:11:11 +03:00
|
|
|
}
|
|
|
|
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
2021-09-29 21:58:01 +03:00
|
|
|
gen_jump_branch(codeblock_t *cb, uint8_t *target0, uint8_t *target1, uint8_t shape)
|
2021-01-19 19:11:11 +03:00
|
|
|
{
|
2021-09-29 22:38:57 +03:00
|
|
|
switch (shape) {
|
|
|
|
case SHAPE_NEXT0:
|
2021-01-19 19:11:11 +03:00
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case SHAPE_NEXT1:
|
2021-01-22 21:29:09 +03:00
|
|
|
RUBY_ASSERT(false);
|
2021-01-19 19:11:11 +03:00
|
|
|
break;
|
|
|
|
|
2021-09-29 22:38:57 +03:00
|
|
|
case SHAPE_DEFAULT:
|
2021-01-19 19:11:11 +03:00
|
|
|
jmp_ptr(cb, target0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
|
|
|
gen_direct_jump(
|
2021-09-29 21:58:01 +03:00
|
|
|
jitstate_t *jit,
|
|
|
|
const ctx_t *ctx,
|
2021-01-19 19:11:11 +03:00
|
|
|
blockid_t target0
|
|
|
|
)
|
|
|
|
{
|
2021-01-22 21:29:09 +03:00
|
|
|
RUBY_ASSERT(target0.iseq != NULL);
|
2021-01-19 19:11:11 +03:00
|
|
|
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_t *branch = make_branch_entry(jit->block, ctx, gen_jump_branch);
|
2021-04-20 00:07:27 +03:00
|
|
|
branch->targets[0] = target0;
|
|
|
|
branch->target_ctxs[0] = *ctx;
|
2021-01-19 19:11:11 +03:00
|
|
|
|
2021-09-29 21:58:01 +03:00
|
|
|
block_t *p_block = find_block_version(target0, ctx);
|
2021-01-19 19:11:11 +03:00
|
|
|
|
|
|
|
// If the version already exists
|
2021-08-26 00:00:45 +03:00
|
|
|
if (p_block) {
|
2021-04-20 00:07:27 +03:00
|
|
|
rb_darray_append(&p_block->incoming, branch);
|
|
|
|
|
2021-11-04 23:05:41 +03:00
|
|
|
branch->dst_addrs[0] = p_block->start_addr;
|
2021-04-20 00:07:27 +03:00
|
|
|
branch->blocks[0] = p_block;
|
|
|
|
branch->shape = SHAPE_DEFAULT;
|
2021-01-19 19:11:11 +03:00
|
|
|
|
|
|
|
// Call the branch generation function
|
2021-11-04 23:05:41 +03:00
|
|
|
branch->start_addr = cb_get_write_ptr(cb);
|
2021-04-20 00:07:27 +03:00
|
|
|
gen_jump_branch(cb, branch->dst_addrs[0], NULL, SHAPE_DEFAULT);
|
2021-11-04 23:05:41 +03:00
|
|
|
branch->end_addr = cb_get_write_ptr(cb);
|
2021-01-13 23:18:35 +03:00
|
|
|
}
|
2021-08-26 00:00:45 +03:00
|
|
|
else {
|
|
|
|
// This NULL target address signals gen_block_version() to compile the
|
|
|
|
// target block right after this one (fallthrough).
|
2021-04-20 00:07:27 +03:00
|
|
|
branch->dst_addrs[0] = NULL;
|
|
|
|
branch->shape = SHAPE_NEXT0;
|
2021-11-04 23:05:41 +03:00
|
|
|
branch->start_addr = cb_get_write_ptr(cb);
|
|
|
|
branch->end_addr = cb_get_write_ptr(cb);
|
2021-01-13 23:18:35 +03:00
|
|
|
}
|
2021-01-13 01:03:54 +03:00
|
|
|
}
|
|
|
|
|
2021-03-03 22:58:42 +03:00
|
|
|
// Create a stub to force the code up to this point to be executed
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
|
|
|
defer_compilation(
|
2021-09-29 21:58:01 +03:00
|
|
|
jitstate_t *jit,
|
|
|
|
ctx_t *cur_ctx
|
2021-03-03 22:58:42 +03:00
|
|
|
)
|
|
|
|
{
|
2021-03-05 23:45:44 +03:00
|
|
|
//fprintf(stderr, "defer compilation at (%p, %d) depth=%d\n", block->blockid.iseq, insn_idx, cur_ctx->chain_depth);
|
2021-03-03 22:58:42 +03:00
|
|
|
|
2021-03-05 23:45:44 +03:00
|
|
|
if (cur_ctx->chain_depth != 0) {
|
2021-04-09 00:55:31 +03:00
|
|
|
rb_bug("double defer");
|
2021-03-05 23:45:44 +03:00
|
|
|
}
|
2021-03-03 22:58:42 +03:00
|
|
|
|
2021-03-05 23:45:44 +03:00
|
|
|
ctx_t next_ctx = *cur_ctx;
|
2021-03-03 22:58:42 +03:00
|
|
|
|
2021-03-05 23:45:44 +03:00
|
|
|
if (next_ctx.chain_depth >= UINT8_MAX) {
|
|
|
|
rb_bug("max block version chain depth reached");
|
|
|
|
}
|
2021-03-03 22:58:42 +03:00
|
|
|
|
2021-03-05 23:45:44 +03:00
|
|
|
next_ctx.chain_depth += 1;
|
2021-03-03 22:58:42 +03:00
|
|
|
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_t *branch = make_branch_entry(jit->block, cur_ctx, gen_jump_branch);
|
2021-03-03 22:58:42 +03:00
|
|
|
|
2021-03-05 23:45:44 +03:00
|
|
|
// Get the branch targets or stubs
|
2021-04-20 00:07:27 +03:00
|
|
|
branch->target_ctxs[0] = next_ctx;
|
2021-09-21 23:09:16 +03:00
|
|
|
branch->targets[0] = (blockid_t){ jit->block->blockid.iseq, jit->insn_idx };
|
2021-04-20 00:07:27 +03:00
|
|
|
branch->dst_addrs[0] = get_branch_target(branch->targets[0], &next_ctx, branch, 0);
|
2021-03-05 23:45:44 +03:00
|
|
|
|
|
|
|
// Call the branch generation function
|
2021-09-29 21:58:01 +03:00
|
|
|
codeblock_t *cb = jit->cb;
|
2021-11-04 23:05:41 +03:00
|
|
|
branch->start_addr = cb_get_write_ptr(cb);
|
2021-04-20 00:07:27 +03:00
|
|
|
gen_jump_branch(cb, branch->dst_addrs[0], NULL, SHAPE_DEFAULT);
|
2021-11-04 23:05:41 +03:00
|
|
|
branch->end_addr = cb_get_write_ptr(cb);
|
2021-03-03 22:58:42 +03:00
|
|
|
}
|
|
|
|
|
2021-02-13 01:12:18 +03:00
|
|
|
// Remove all references to a block then free it.
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
2021-03-07 02:46:56 +03:00
|
|
|
yjit_free_block(block_t *block)
|
2021-02-13 01:12:18 +03:00
|
|
|
{
|
2021-03-07 02:46:56 +03:00
|
|
|
yjit_unlink_method_lookup_dependency(block);
|
|
|
|
yjit_block_assumptions_free(block);
|
2021-02-25 23:10:38 +03:00
|
|
|
|
2021-06-08 21:15:02 +03:00
|
|
|
// Remove this block from the predecessor's targets
|
|
|
|
rb_darray_for(block->incoming, incoming_idx) {
|
|
|
|
// Branch from the predecessor to us
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_t *pred_branch = rb_darray_get(block->incoming, incoming_idx);
|
2021-06-08 21:15:02 +03:00
|
|
|
|
|
|
|
// If this is us, nullify the target block
|
|
|
|
for (size_t succ_idx = 0; succ_idx < 2; succ_idx++) {
|
|
|
|
if (pred_branch->blocks[succ_idx] == block) {
|
|
|
|
pred_branch->blocks[succ_idx] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-20 00:07:27 +03:00
|
|
|
// For each outgoing branch
|
|
|
|
rb_darray_for(block->outgoing, branch_idx) {
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_t *out_branch = rb_darray_get(block->outgoing, branch_idx);
|
2021-04-20 00:07:27 +03:00
|
|
|
|
|
|
|
// For each successor block
|
|
|
|
for (size_t succ_idx = 0; succ_idx < 2; succ_idx++) {
|
2021-09-29 21:58:01 +03:00
|
|
|
block_t *succ = out_branch->blocks[succ_idx];
|
2021-04-20 00:07:27 +03:00
|
|
|
|
|
|
|
if (succ == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Remove this block from the successor's incoming list
|
|
|
|
rb_darray_for(succ->incoming, incoming_idx) {
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_t *pred_branch = rb_darray_get(succ->incoming, incoming_idx);
|
2021-04-20 00:07:27 +03:00
|
|
|
if (pred_branch == out_branch) {
|
|
|
|
rb_darray_remove_unordered(succ->incoming, incoming_idx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free the outgoing branch entry
|
|
|
|
free(out_branch);
|
|
|
|
}
|
|
|
|
|
2021-02-20 00:04:23 +03:00
|
|
|
rb_darray_free(block->incoming);
|
2021-04-20 00:07:27 +03:00
|
|
|
rb_darray_free(block->outgoing);
|
2021-02-19 23:49:23 +03:00
|
|
|
rb_darray_free(block->gc_object_offsets);
|
2021-02-25 23:10:38 +03:00
|
|
|
|
|
|
|
free(block);
|
2021-02-13 01:12:18 +03:00
|
|
|
}
|
|
|
|
|
2021-04-20 00:07:27 +03:00
|
|
|
// Remove a block version
|
|
|
|
static void
|
2021-03-07 02:46:56 +03:00
|
|
|
block_array_remove(rb_yjit_block_array_t block_array, block_t *block)
|
2021-03-04 20:05:18 +03:00
|
|
|
{
|
|
|
|
block_t **element;
|
|
|
|
rb_darray_foreach(block_array, idx, element) {
|
2021-04-20 00:07:27 +03:00
|
|
|
if (*element == block) {
|
|
|
|
rb_darray_remove_unordered(block_array, idx);
|
|
|
|
return;
|
2021-03-04 23:31:37 +03:00
|
|
|
}
|
2021-03-04 20:05:18 +03:00
|
|
|
}
|
|
|
|
|
2021-04-20 00:07:27 +03:00
|
|
|
RUBY_ASSERT(false);
|
2021-03-04 20:05:18 +03:00
|
|
|
}
|
|
|
|
|
2021-12-07 03:14:34 +03:00
|
|
|
// Some runtime checks for integrity of a program location
|
|
|
|
static void
|
|
|
|
verify_blockid(const blockid_t blockid)
|
|
|
|
{
|
|
|
|
const rb_iseq_t *const iseq = blockid.iseq;
|
|
|
|
RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
|
2022-03-23 22:19:48 +03:00
|
|
|
RUBY_ASSERT_ALWAYS(blockid.idx < ISEQ_BODY(iseq)->iseq_size);
|
2021-12-07 03:14:34 +03:00
|
|
|
}
|
|
|
|
|
2021-01-13 01:03:54 +03:00
|
|
|
// Invalidate one specific block version
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
2021-08-26 00:00:45 +03:00
|
|
|
invalidate_block_version(block_t *block)
|
2021-01-13 01:03:54 +03:00
|
|
|
{
|
2021-03-31 19:50:16 +03:00
|
|
|
ASSERT_vm_locking();
|
2021-10-27 02:57:30 +03:00
|
|
|
|
2021-03-25 01:07:26 +03:00
|
|
|
// TODO: want to assert that all other ractors are stopped here. Can't patch
|
|
|
|
// machine code that some other thread is running.
|
2021-03-31 19:50:16 +03:00
|
|
|
|
2021-12-07 03:14:34 +03:00
|
|
|
verify_blockid(block->blockid);
|
|
|
|
|
2021-02-13 01:12:18 +03:00
|
|
|
const rb_iseq_t *iseq = block->blockid.iseq;
|
|
|
|
|
2021-04-20 00:07:27 +03:00
|
|
|
//fprintf(stderr, "invalidating block (%p, %d)\n", block->blockid.iseq, block->blockid.idx);
|
|
|
|
//fprintf(stderr, "block=%p\n", block);
|
2021-01-15 00:58:20 +03:00
|
|
|
|
2021-03-04 20:05:18 +03:00
|
|
|
// Remove this block from the version array
|
2021-03-25 01:07:26 +03:00
|
|
|
rb_yjit_block_array_t versions = yjit_get_version_array(iseq, block->blockid.idx);
|
2021-04-20 00:07:27 +03:00
|
|
|
block_array_remove(versions, block);
|
2021-01-13 01:03:54 +03:00
|
|
|
|
2021-01-15 00:58:20 +03:00
|
|
|
// Get a pointer to the generated code for this block
|
2021-11-04 23:05:41 +03:00
|
|
|
uint8_t *code_ptr = block->start_addr;
|
2021-01-13 01:03:54 +03:00
|
|
|
|
2021-11-04 19:30:30 +03:00
|
|
|
// Make the the start of the block do an exit. This handles OOM situations
|
|
|
|
// and some cases where we can't efficiently patch incoming branches.
|
|
|
|
// Do this first, since in case there is a fallthrough branch into this
|
|
|
|
// block, the patching loop below can overwrite the start of the block.
|
|
|
|
// In those situations, there is hopefully no jumps to the start of the block
|
|
|
|
// after patching as the start of the block would be in the middle of something
|
|
|
|
// generated by branch_t::gen_fn.
|
|
|
|
{
|
|
|
|
RUBY_ASSERT_ALWAYS(block->entry_exit && "block invalidation requires an exit");
|
|
|
|
if (block->entry_exit == block->start_addr) {
|
|
|
|
// Some blocks exit on entry. Patching a jump to the entry at the
|
|
|
|
// entry makes an infinite loop.
|
|
|
|
}
|
|
|
|
else if (block->start_addr >= cb_get_ptr(cb, yjit_codepage_frozen_bytes)) { // Don't patch frozen code region
|
|
|
|
// Patch in a jump to block->entry_exit.
|
|
|
|
uint32_t cur_pos = cb->write_pos;
|
|
|
|
cb_set_write_ptr(cb, block->start_addr);
|
|
|
|
jmp_ptr(cb, block->entry_exit);
|
|
|
|
RUBY_ASSERT_ALWAYS(cb_get_ptr(cb, cb->write_pos) < block->end_addr && "invalidation wrote past end of block");
|
|
|
|
cb_set_pos(cb, cur_pos);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-14 21:33:19 +03:00
|
|
|
// For each incoming branch
|
2021-08-26 00:00:45 +03:00
|
|
|
rb_darray_for(block->incoming, incoming_idx) {
|
2021-09-29 21:58:01 +03:00
|
|
|
branch_t *branch = rb_darray_get(block->incoming, incoming_idx);
|
2021-01-14 21:33:19 +03:00
|
|
|
uint32_t target_idx = (branch->dst_addrs[0] == code_ptr)? 0:1;
|
2021-04-27 23:27:56 +03:00
|
|
|
RUBY_ASSERT(branch->dst_addrs[target_idx] == code_ptr);
|
|
|
|
RUBY_ASSERT(branch->blocks[target_idx] == block);
|
|
|
|
|
|
|
|
// Mark this target as being a stub
|
|
|
|
branch->blocks[target_idx] = NULL;
|
2021-01-14 21:33:19 +03:00
|
|
|
|
2021-08-26 00:00:45 +03:00
|
|
|
// Don't patch frozen code region
|
2021-11-04 23:05:41 +03:00
|
|
|
if (branch->start_addr < cb_get_ptr(cb, yjit_codepage_frozen_bytes)) {
|
2021-08-26 00:00:45 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-01-14 21:33:19 +03:00
|
|
|
// Create a stub for this branch target
|
2021-11-04 19:30:30 +03:00
|
|
|
uint8_t *branch_target = get_branch_target(
|
2021-01-14 21:33:19 +03:00
|
|
|
block->blockid,
|
|
|
|
&block->ctx,
|
2021-04-20 00:07:27 +03:00
|
|
|
branch,
|
2021-01-14 21:33:19 +03:00
|
|
|
target_idx
|
|
|
|
);
|
|
|
|
|
2021-11-04 19:30:30 +03:00
|
|
|
if (!branch_target) {
|
|
|
|
// We were unable to generate a stub (e.g. OOM). Use the block's
|
|
|
|
// exit instead of a stub for the block. It's important that we
|
|
|
|
// still patch the branch in this situation so stubs are unique
|
|
|
|
// to branches. Think about what could go wrong if we run out of
|
|
|
|
// memory in the middle of this loop.
|
|
|
|
branch_target = block->entry_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
branch->dst_addrs[target_idx] = branch_target;
|
|
|
|
|
2021-01-14 21:33:19 +03:00
|
|
|
// Check if the invalidated block immediately follows
|
2021-11-04 23:05:41 +03:00
|
|
|
bool target_next = (block->start_addr == branch->end_addr);
|
2021-01-14 21:33:19 +03:00
|
|
|
|
2021-08-26 00:00:45 +03:00
|
|
|
if (target_next) {
|
2021-11-04 19:30:30 +03:00
|
|
|
// The new block will no longer be adjacent.
|
|
|
|
// Note that we could be enlarging the branch and writing into the
|
|
|
|
// start of the block being invalidated.
|
2021-01-14 21:33:19 +03:00
|
|
|
branch->shape = SHAPE_DEFAULT;
|
|
|
|
}
|
2021-01-13 01:03:54 +03:00
|
|
|
|
2021-01-14 21:33:19 +03:00
|
|
|
// Rewrite the branch with the new jump target address
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
regenerate_branch(cb, branch);
|
2021-01-13 01:03:54 +03:00
|
|
|
|
2021-11-04 23:05:41 +03:00
|
|
|
if (target_next && branch->end_addr > block->end_addr) {
|
2021-12-29 08:09:37 +03:00
|
|
|
fprintf(stderr, "branch_block_idx=%u block_idx=%u over=%td block_size=%td\n",
|
2021-08-26 00:00:45 +03:00
|
|
|
branch->block->blockid.idx,
|
|
|
|
block->blockid.idx,
|
2021-11-04 23:05:41 +03:00
|
|
|
branch->end_addr - block->end_addr,
|
|
|
|
block->end_addr - block->start_addr);
|
2021-08-26 00:00:45 +03:00
|
|
|
yjit_print_iseq(branch->block->blockid.iseq);
|
2021-06-22 23:37:48 +03:00
|
|
|
rb_bug("yjit invalidate rewrote branch past end of invalidated block");
|
2021-01-14 21:33:19 +03:00
|
|
|
}
|
|
|
|
}
|
2021-01-13 01:03:54 +03:00
|
|
|
|
2021-07-15 21:31:59 +03:00
|
|
|
// Clear out the JIT func so that we can recompile later and so the
|
|
|
|
// interpreter will run the iseq
|
2021-07-16 01:43:39 +03:00
|
|
|
|
|
|
|
#if JIT_ENABLED
|
2021-08-26 21:41:47 +03:00
|
|
|
// Only clear the jit_func when we're invalidating the JIT entry block.
|
|
|
|
// We only support compiling iseqs from index 0 right now. So entry
|
|
|
|
// points will always have an instruction index of 0. We'll need to
|
|
|
|
// change this in the future when we support optional parameters because
|
|
|
|
// they enter the function with a non-zero PC
|
|
|
|
if (block->blockid.idx == 0) {
|
2022-03-23 22:19:48 +03:00
|
|
|
ISEQ_BODY(iseq)->jit_func = 0;
|
2021-08-26 21:41:47 +03:00
|
|
|
}
|
2021-07-16 01:43:39 +03:00
|
|
|
#endif
|
2021-01-13 01:03:54 +03:00
|
|
|
|
2021-01-14 21:33:19 +03:00
|
|
|
// TODO:
|
2021-02-11 23:27:33 +03:00
|
|
|
// May want to recompile a new entry point (for interpreter entry blocks)
|
|
|
|
// This isn't necessary for correctness
|
|
|
|
|
|
|
|
// FIXME:
|
2021-01-14 21:33:19 +03:00
|
|
|
// Call continuation addresses on the stack can also be atomically replaced by jumps going to the stub.
|
2021-01-13 01:03:54 +03:00
|
|
|
|
2021-03-07 02:46:56 +03:00
|
|
|
yjit_free_block(block);
|
2021-01-19 21:28:52 +03:00
|
|
|
|
2021-09-15 20:59:50 +03:00
|
|
|
#if YJIT_STATS
|
|
|
|
yjit_runtime_counters.invalidation_count++;
|
|
|
|
#endif
|
|
|
|
|
2021-10-27 02:57:30 +03:00
|
|
|
cb_mark_all_executable(ocb);
|
|
|
|
cb_mark_all_executable(cb);
|
|
|
|
|
2021-02-17 21:08:53 +03:00
|
|
|
// fprintf(stderr, "invalidation done\n");
|
2020-12-17 01:07:18 +03:00
|
|
|
}
|
|
|
|
|
2021-10-02 01:38:39 +03:00
|
|
|
static void
|
2021-03-07 02:46:56 +03:00
|
|
|
yjit_init_core(void)
|
2020-12-11 00:59:13 +03:00
|
|
|
{
|
YJIT: Add ability to exit to interpreter from stubs
Previously, YJIT assumed that it's always possible to generate a new
basic block when servicing a stub in branch_stub_hit(). When YJIT is out
of executable memory, for example, this assumption doesn't hold up.
Add handling to branch_stub_hit() for servicing stubs without consuming
more executable memory by adding a code path that exits to the
interpreter at the location the branch stub represents. The new code
path reconstructs interpreter state in branch_stub_hit() and then exits
with a new snippet called `code_for_exit_from_stub` that returns
`Qundef` from the YJIT native stack frame.
As this change adds another place where we regenerate code from
`branch_t`, extract the logic for it into a new function and call it
regenerate_branch(). While we are at it, make the branch shrinking code
path in branch_stub_hit() more explicit.
This new functionality is hard to test without full support for out of
memory conditions. To verify this change, I ran
`RUBY_YJIT_ENABLE=1 make check -j12` with the following patch to stress
test the new code path:
```diff
diff --git a/yjit_core.c b/yjit_core.c
index 4ab63d9806..5788b8c5ed 100644
--- a/yjit_core.c
+++ b/yjit_core.c
@@ -878,8 +878,12 @@ branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_contex
cb_set_write_ptr(cb, branch->end_addr);
}
+if (rand() < RAND_MAX/2) {
// Compile the new block version
p_block = gen_block_version(target, target_ctx, ec);
+}else{
+ p_block = NULL;
+}
if (!p_block && branch_modified) {
// We couldn't generate a new block for the branch, but we modified the branch.
```
We can enable the new test along with other OOM tests once full support
lands.
Other small changes:
* yjit_utils.c (print_str): Update to work with new native frame shape.
Follow up for 8fa0ee4d404.
* yjit_iface.c (rb_yjit_init): Run yjit_init_core() after
yjit_init_codegen() so `cb` and `ocb` are available.
2021-11-27 02:00:42 +03:00
|
|
|
gen_code_for_exit_from_stub();
|
2020-12-11 00:59:13 +03:00
|
|
|
}
|