зеркало из https://github.com/github/ruby.git
rb_execution_context_t: move stack, stack_size and cfp from rb_thread_t
The goal is to reduce rb_context_t and rb_fiber_t size by removing the need to store the entire rb_thread_t in there. [ruby-core:81045] Work-in-progress: soon, we will move more fields here. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58614 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
Родитель
62b885b090
Коммит
9d09240d9e
4
.gdbinit
4
.gdbinit
|
@ -1094,8 +1094,8 @@ define rb_ps_thread
|
|||
set $ps_thread_th = (rb_thread_t*)$ps_thread->data
|
||||
printf "* #<Thread:%p rb_thread_t:%p native_thread:%p>\n", \
|
||||
$ps_thread, $ps_thread_th, $ps_thread_th->thread_id
|
||||
set $cfp = $ps_thread_th->cfp
|
||||
set $cfpend = (rb_control_frame_t *)($ps_thread_th->stack + $ps_thread_th->stack_size)-1
|
||||
set $cfp = $ps_thread_th->ec.cfp
|
||||
set $cfpend = (rb_control_frame_t *)($ps_thread_th->ec.stack + $ps_thread_th->ec.stack_size)-1
|
||||
while $cfp < $cfpend
|
||||
if $cfp->iseq
|
||||
if $cfp->pc
|
||||
|
|
|
@ -7116,7 +7116,7 @@ caller_location(VALUE *path, VALUE *absolute_path)
|
|||
{
|
||||
const rb_thread_t *const th = GET_THREAD();
|
||||
const rb_control_frame_t *const cfp =
|
||||
rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp) {
|
||||
int line = rb_vm_get_sourceline(cfp);
|
||||
|
|
60
cont.c
60
cont.c
|
@ -87,8 +87,8 @@ typedef struct rb_context_struct {
|
|||
VALUE value;
|
||||
VALUE *vm_stack;
|
||||
#ifdef CAPTURE_JUST_VALID_VM_STACK
|
||||
size_t vm_stack_slen; /* length of stack (head of th->stack) */
|
||||
size_t vm_stack_clen; /* length of control frames (tail of th->stack) */
|
||||
size_t vm_stack_slen; /* length of stack (head of th->ec.stack) */
|
||||
size_t vm_stack_clen; /* length of control frames (tail of th->ec.stack) */
|
||||
#endif
|
||||
struct {
|
||||
VALUE *stack;
|
||||
|
@ -221,7 +221,7 @@ cont_free(void *ptr)
|
|||
rb_context_t *cont = ptr;
|
||||
|
||||
RUBY_FREE_ENTER("cont");
|
||||
RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack);
|
||||
RUBY_FREE_UNLESS_NULL(cont->saved_thread.ec.stack);
|
||||
#if FIBER_USE_NATIVE
|
||||
if (cont->type == CONTINUATION_CONTEXT) {
|
||||
/* cont */
|
||||
|
@ -280,7 +280,7 @@ cont_memsize(const void *ptr)
|
|||
#ifdef CAPTURE_JUST_VALID_VM_STACK
|
||||
size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
|
||||
#else
|
||||
size_t n = cont->saved_thread.stack_size;
|
||||
size_t n = cont->saved_thread.ec.stack_size;
|
||||
#endif
|
||||
size += n * sizeof(*cont->vm_stack);
|
||||
}
|
||||
|
@ -409,10 +409,8 @@ cont_save_thread(rb_context_t *cont, rb_thread_t *th)
|
|||
rb_thread_t *sth = &cont->saved_thread;
|
||||
|
||||
/* save thread context */
|
||||
sth->stack = th->stack;
|
||||
sth->stack_size = th->stack_size;
|
||||
sth->ec = th->ec;
|
||||
sth->local_storage = th->local_storage;
|
||||
sth->cfp = th->cfp;
|
||||
sth->safe_level = th->safe_level;
|
||||
sth->raised_flag = th->raised_flag;
|
||||
sth->state = th->state;
|
||||
|
@ -470,6 +468,7 @@ cont_capture(volatile int *volatile stat)
|
|||
rb_context_t *volatile cont;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
volatile VALUE contval;
|
||||
rb_execution_context_t *ec = &th->ec;
|
||||
|
||||
THREAD_MUST_BE_RUNNING(th);
|
||||
rb_vm_stack_to_heap(th);
|
||||
|
@ -477,16 +476,17 @@ cont_capture(volatile int *volatile stat)
|
|||
contval = cont->self;
|
||||
|
||||
#ifdef CAPTURE_JUST_VALID_VM_STACK
|
||||
cont->vm_stack_slen = th->cfp->sp - th->stack;
|
||||
cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
|
||||
cont->vm_stack_slen = ec->cfp->sp - ec->stack;
|
||||
cont->vm_stack_clen = ec->stack + ec->stack_size - (VALUE*)ec->cfp;
|
||||
cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
|
||||
MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
|
||||
MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
|
||||
MEMCPY(cont->vm_stack, ec->stack, VALUE, cont->vm_stack_slen);
|
||||
MEMCPY(cont->vm_stack + cont->vm_stack_slen,
|
||||
(VALUE*)ec->cfp, VALUE, cont->vm_stack_clen);
|
||||
#else
|
||||
cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
|
||||
MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
|
||||
cont->vm_stack = ALLOC_N(VALUE, ec->stack_size);
|
||||
MEMCPY(cont->vm_stack, ec->stack, VALUE, ec->stack_size);
|
||||
#endif
|
||||
cont->saved_thread.stack = NULL;
|
||||
cont->saved_thread.ec.stack = NULL;
|
||||
|
||||
cont_save_machine_stack(th, cont);
|
||||
|
||||
|
@ -535,30 +535,30 @@ cont_restore_thread(rb_context_t *cont)
|
|||
th->fiber = sth->fiber;
|
||||
fib = th->fiber ? th->fiber : th->root_fiber;
|
||||
|
||||
if (fib && fib->cont.saved_thread.stack) {
|
||||
th->stack_size = fib->cont.saved_thread.stack_size;
|
||||
th->stack = fib->cont.saved_thread.stack;
|
||||
if (fib && fib->cont.saved_thread.ec.stack) {
|
||||
th->ec.stack_size = fib->cont.saved_thread.ec.stack_size;
|
||||
th->ec.stack = fib->cont.saved_thread.ec.stack;
|
||||
}
|
||||
#ifdef CAPTURE_JUST_VALID_VM_STACK
|
||||
MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
|
||||
MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
|
||||
MEMCPY(th->ec.stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
|
||||
MEMCPY(th->ec.stack + sth->ec.stack_size - cont->vm_stack_clen,
|
||||
cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
|
||||
#else
|
||||
MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
|
||||
MEMCPY(th->ec.stack, cont->vm_stack, VALUE, sth->ec.stack_size);
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
/* fiber */
|
||||
th->stack = sth->stack;
|
||||
sth->stack = NULL;
|
||||
th->stack_size = sth->stack_size;
|
||||
th->ec.stack = sth->ec.stack;
|
||||
sth->ec.stack = NULL;
|
||||
th->ec.stack_size = sth->ec.stack_size;
|
||||
th->local_storage = sth->local_storage;
|
||||
th->local_storage_recursive_hash = sth->local_storage_recursive_hash;
|
||||
th->local_storage_recursive_hash_for_trace = sth->local_storage_recursive_hash_for_trace;
|
||||
th->fiber = (rb_fiber_t*)cont;
|
||||
}
|
||||
|
||||
th->cfp = sth->cfp;
|
||||
th->ec.cfp = sth->ec.cfp;
|
||||
th->safe_level = sth->safe_level;
|
||||
th->raised_flag = sth->raised_flag;
|
||||
th->state = sth->state;
|
||||
|
@ -1208,12 +1208,12 @@ fiber_init(VALUE fibval, VALUE proc)
|
|||
/* initialize cont */
|
||||
cont->vm_stack = 0;
|
||||
|
||||
th->stack = NULL;
|
||||
th->stack_size = 0;
|
||||
th->ec.stack = NULL;
|
||||
th->ec.stack_size = 0;
|
||||
|
||||
th->stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
|
||||
th->stack = ALLOC_N(VALUE, th->stack_size);
|
||||
th->cfp = (void *)(th->stack + th->stack_size);
|
||||
th->ec.stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
|
||||
th->ec.stack = ALLOC_N(VALUE, th->ec.stack_size);
|
||||
th->ec.cfp = (void *)(th->ec.stack + th->ec.stack_size);
|
||||
|
||||
rb_vm_push_frame(th,
|
||||
NULL,
|
||||
|
@ -1222,7 +1222,7 @@ fiber_init(VALUE fibval, VALUE proc)
|
|||
VM_BLOCK_HANDLER_NONE,
|
||||
0, /* specval */
|
||||
NULL, /* pc */
|
||||
th->stack, /* sp */
|
||||
th->ec.stack, /* sp */
|
||||
0, /* local_size */
|
||||
0);
|
||||
|
||||
|
|
3
error.c
3
error.c
|
@ -1279,7 +1279,8 @@ name_err_initialize(int argc, VALUE *argv, VALUE self)
|
|||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp =
|
||||
rb_vm_get_ruby_level_next_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp));
|
||||
rb_vm_get_ruby_level_next_cfp(th,
|
||||
RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp));
|
||||
if (cfp) iseqw = rb_iseqw_new(cfp->iseq);
|
||||
}
|
||||
rb_ivar_set(self, id_iseq, iseqw);
|
||||
|
|
24
eval.c
24
eval.c
|
@ -563,7 +563,7 @@ setup_exception(rb_thread_t *th, int tag, volatile VALUE mesg, VALUE cause)
|
|||
|
||||
if (tag != TAG_FATAL) {
|
||||
RUBY_DTRACE_HOOK(RAISE, rb_obj_classname(th->errinfo));
|
||||
EXEC_EVENT_HOOK(th, RUBY_EVENT_RAISE, th->cfp->self, 0, 0, 0, mesg);
|
||||
EXEC_EVENT_HOOK(th, RUBY_EVENT_RAISE, th->ec.cfp->self, 0, 0, 0, mesg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -740,7 +740,7 @@ void
|
|||
rb_raise_jump(VALUE mesg, VALUE cause)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
const rb_control_frame_t *cfp = th->cfp;
|
||||
const rb_control_frame_t *cfp = th->ec.cfp;
|
||||
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
|
||||
VALUE klass = me->owner;
|
||||
VALUE self = cfp->self;
|
||||
|
@ -765,7 +765,7 @@ int
|
|||
rb_block_given_p(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
if (rb_vm_frame_block_handler(th->cfp) == VM_BLOCK_HANDLER_NONE) {
|
||||
if (rb_vm_frame_block_handler(th->ec.cfp) == VM_BLOCK_HANDLER_NONE) {
|
||||
return FALSE;
|
||||
}
|
||||
else {
|
||||
|
@ -795,7 +795,7 @@ rb_rescue2(VALUE (* b_proc) (ANYARGS), VALUE data1,
|
|||
{
|
||||
int state;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *volatile cfp = th->cfp;
|
||||
rb_control_frame_t *volatile cfp = th->ec.cfp;
|
||||
volatile VALUE result = Qfalse;
|
||||
volatile VALUE e_info = th->errinfo;
|
||||
va_list args;
|
||||
|
@ -861,7 +861,7 @@ rb_protect(VALUE (* proc) (VALUE), VALUE data, int * state)
|
|||
volatile VALUE result = Qnil;
|
||||
volatile int status;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *volatile cfp = th->cfp;
|
||||
rb_control_frame_t *volatile cfp = th->ec.cfp;
|
||||
struct rb_vm_protect_tag protect_tag;
|
||||
rb_jmpbuf_t org_jmpbuf;
|
||||
|
||||
|
@ -946,21 +946,21 @@ frame_called_id(rb_control_frame_t *cfp)
|
|||
ID
|
||||
rb_frame_this_func(void)
|
||||
{
|
||||
return frame_func_id(GET_THREAD()->cfp);
|
||||
return frame_func_id(GET_THREAD()->ec.cfp);
|
||||
}
|
||||
|
||||
ID
|
||||
rb_frame_callee(void)
|
||||
{
|
||||
return frame_called_id(GET_THREAD()->cfp);
|
||||
return frame_called_id(GET_THREAD()->ec.cfp);
|
||||
}
|
||||
|
||||
static rb_control_frame_t *
|
||||
previous_frame(rb_thread_t *th)
|
||||
{
|
||||
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
|
||||
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp);
|
||||
/* check if prev_cfp can be accessible */
|
||||
if ((void *)(th->stack + th->stack_size) == (void *)(prev_cfp)) {
|
||||
if ((void *)(th->ec.stack + th->ec.stack_size) == (void *)(prev_cfp)) {
|
||||
return 0;
|
||||
}
|
||||
return prev_cfp;
|
||||
|
@ -986,7 +986,7 @@ ID
|
|||
rb_frame_last_func(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
ID mid;
|
||||
|
||||
while (!(mid = frame_func_id(cfp)) &&
|
||||
|
@ -1257,7 +1257,7 @@ rb_mod_refine(VALUE module, VALUE klass)
|
|||
id_refined_class, id_defined_at;
|
||||
VALUE refinements, activated_refinements;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
VALUE block_handler = rb_vm_frame_block_handler(th->cfp);
|
||||
VALUE block_handler = rb_vm_frame_block_handler(th->ec.cfp);
|
||||
|
||||
if (block_handler == VM_BLOCK_HANDLER_NONE) {
|
||||
rb_raise(rb_eArgError, "no block given");
|
||||
|
@ -1525,7 +1525,7 @@ top_using(VALUE self, VALUE module)
|
|||
static const VALUE *
|
||||
errinfo_place(rb_thread_t *th)
|
||||
{
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
rb_control_frame_t *end_cfp = RUBY_VM_END_CONTROL_FRAME(th);
|
||||
|
||||
while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
|
||||
|
|
|
@ -14,10 +14,10 @@ vm_passed_block_handler_set(rb_thread_t *th, VALUE block_handler)
|
|||
static inline void
|
||||
pass_passed_block_handler(rb_thread_t *th)
|
||||
{
|
||||
VALUE block_handler = rb_vm_frame_block_handler(th->cfp);
|
||||
VALUE block_handler = rb_vm_frame_block_handler(th->ec.cfp);
|
||||
VM_ASSERT(vm_block_handler_verify(block_handler));
|
||||
vm_passed_block_handler_set(th, block_handler);
|
||||
VM_ENV_FLAGS_SET(th->cfp->ep, VM_FRAME_FLAG_PASSED);
|
||||
VM_ENV_FLAGS_SET(th->ec.cfp->ep, VM_FRAME_FLAG_PASSED);
|
||||
}
|
||||
|
||||
#define PASS_PASSED_BLOCK_HANDLER_TH(th) pass_passed_block_handler(th)
|
||||
|
|
2
gc.c
2
gc.c
|
@ -1755,7 +1755,7 @@ rb_objspace_set_event_hook(const rb_event_flag_t event)
|
|||
static void
|
||||
gc_event_hook_body(rb_thread_t *th, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
|
||||
{
|
||||
EXEC_EVENT_HOOK(th, event, th->cfp->self, 0, 0, 0, data);
|
||||
EXEC_EVENT_HOOK(th, event, th->ec.cfp->self, 0, 0, 0, data);
|
||||
}
|
||||
|
||||
#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
|
||||
|
|
8
proc.c
8
proc.c
|
@ -338,7 +338,7 @@ VALUE
|
|||
rb_binding_new(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
return rb_vm_make_binding(th, th->cfp);
|
||||
return rb_vm_make_binding(th, th->ec.cfp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -652,7 +652,7 @@ proc_new(VALUE klass, int8_t is_lambda)
|
|||
{
|
||||
VALUE procval;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
VALUE block_handler;
|
||||
|
||||
if ((block_handler = rb_vm_frame_block_handler(cfp)) == VM_BLOCK_HANDLER_NONE) {
|
||||
|
@ -1002,7 +1002,7 @@ rb_block_arity(void)
|
|||
{
|
||||
int min, max;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
VALUE block_handler = rb_vm_frame_block_handler(cfp);
|
||||
struct rb_block block;
|
||||
|
||||
|
@ -1844,7 +1844,7 @@ rb_mod_define_method(int argc, VALUE *argv, VALUE mod)
|
|||
body = rb_block_lambda();
|
||||
#else
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
VALUE block_handler = rb_vm_frame_block_handler(th->cfp);
|
||||
VALUE block_handler = rb_vm_frame_block_handler(th->ec.cfp);
|
||||
if (block_handler == VM_BLOCK_HANDLER_NONE) rb_raise(rb_eArgError, proc_without_block);
|
||||
|
||||
switch (vm_block_handler_type(block_handler)) {
|
||||
|
|
9
thread.c
9
thread.c
|
@ -690,8 +690,8 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s
|
|||
rb_check_deadlock(th->vm);
|
||||
|
||||
if (!th->root_fiber) {
|
||||
rb_thread_recycle_stack_release(th->stack);
|
||||
th->stack = 0;
|
||||
rb_thread_recycle_stack_release(th->ec.stack);
|
||||
th->ec.stack = 0;
|
||||
}
|
||||
}
|
||||
native_mutex_lock(&th->vm->thread_destruct_lock);
|
||||
|
@ -2095,7 +2095,8 @@ rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
|
|||
if (th->status == THREAD_RUNNABLE)
|
||||
th->running_time_us += TIME_QUANTUM_USEC;
|
||||
|
||||
EXEC_EVENT_HOOK(th, RUBY_INTERNAL_EVENT_SWITCH, th->cfp->self, 0, 0, 0, Qundef);
|
||||
EXEC_EVENT_HOOK(th, RUBY_INTERNAL_EVENT_SWITCH, th->ec.cfp->self,
|
||||
0, 0, 0, Qundef);
|
||||
|
||||
rb_thread_schedule_limits(limits_us);
|
||||
}
|
||||
|
@ -4996,7 +4997,7 @@ rb_check_deadlock(rb_vm_t *vm)
|
|||
static void
|
||||
update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
|
||||
{
|
||||
VALUE coverage = rb_iseq_coverage(GET_THREAD()->cfp->iseq);
|
||||
VALUE coverage = rb_iseq_coverage(GET_THREAD()->ec.cfp->iseq);
|
||||
if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
|
||||
long line = rb_sourceline() - 1;
|
||||
long count;
|
||||
|
|
158
vm.c
158
vm.c
|
@ -88,8 +88,8 @@ rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
|
|||
static int
|
||||
VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp)
|
||||
{
|
||||
const VALUE *start = th->stack;
|
||||
const VALUE *end = (VALUE *)th->stack + th->stack_size;
|
||||
const VALUE *start = th->ec.stack;
|
||||
const VALUE *end = (VALUE *)th->ec.stack + th->ec.stack_size;
|
||||
if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -101,8 +101,8 @@ VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp)
|
|||
static int
|
||||
VM_EP_IN_HEAP_P(const rb_thread_t *th, const VALUE *ep)
|
||||
{
|
||||
const VALUE *start = th->stack;
|
||||
const VALUE *end = (VALUE *)th->cfp;
|
||||
const VALUE *start = th->ec.stack;
|
||||
const VALUE *end = (VALUE *)th->ec.cfp;
|
||||
if (start <= ep && ep < end) {
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -439,7 +439,8 @@ vm_set_top_stack(rb_thread_t *th, const rb_iseq_t *iseq)
|
|||
vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, th->top_self,
|
||||
VM_BLOCK_HANDLER_NONE,
|
||||
(VALUE)vm_cref_new_toplevel(th), /* cref or me */
|
||||
iseq->body->iseq_encoded, th->cfp->sp, iseq->body->local_table_size, iseq->body->stack_max);
|
||||
iseq->body->iseq_encoded, th->ec.cfp->sp,
|
||||
iseq->body->local_table_size, iseq->body->stack_max);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -449,7 +450,8 @@ vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref
|
|||
vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
|
||||
(VALUE)cref, /* cref or me */
|
||||
iseq->body->iseq_encoded,
|
||||
th->cfp->sp, iseq->body->local_table_size, iseq->body->stack_max);
|
||||
th->ec.cfp->sp, iseq->body->local_table_size,
|
||||
iseq->body->stack_max);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -465,7 +467,7 @@ vm_set_main_stack(rb_thread_t *th, const rb_iseq_t *iseq)
|
|||
|
||||
/* save binding */
|
||||
if (iseq->body->local_table_size > 0) {
|
||||
vm_bind_update_env(bind, vm_make_env_object(th, th->cfp));
|
||||
vm_bind_update_env(bind, vm_make_env_object(th, th->ec.cfp));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -519,7 +521,7 @@ void
|
|||
rb_vm_pop_cfunc_frame(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
|
||||
|
||||
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil);
|
||||
|
@ -531,11 +533,11 @@ void
|
|||
rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
|
||||
{
|
||||
/* check skipped frame */
|
||||
while (th->cfp != cfp) {
|
||||
while (th->ec.cfp != cfp) {
|
||||
#if VMDEBUG
|
||||
printf("skipped frame: %s\n", vm_frametype_name(th->cfp));
|
||||
printf("skipped frame: %s\n", vm_frametype_name(th->ec.cfp));
|
||||
#endif
|
||||
if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_CFUNC) {
|
||||
if (VM_FRAME_TYPE(th->ec.cfp) != VM_FRAME_MAGIC_CFUNC) {
|
||||
rb_vm_pop_frame(th);
|
||||
}
|
||||
else { /* unlikely path */
|
||||
|
@ -717,7 +719,7 @@ vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp)
|
|||
void
|
||||
rb_vm_stack_to_heap(rb_thread_t *th)
|
||||
{
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
while ((cfp = rb_vm_get_binding_creatable_next_cfp(th, cfp)) != 0) {
|
||||
vm_make_env_object(th, cfp);
|
||||
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
|
||||
|
@ -940,7 +942,7 @@ rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars)
|
|||
ALLOCV_END(idtmp);
|
||||
|
||||
vm_set_eval_stack(th, iseq, 0, base_block);
|
||||
vm_bind_update_env(bind, envval = vm_make_env_object(th, th->cfp));
|
||||
vm_bind_update_env(bind, envval = vm_make_env_object(th, th->ec.cfp));
|
||||
rb_vm_pop_frame(th);
|
||||
|
||||
env = (const rb_env_t *)envval;
|
||||
|
@ -958,7 +960,8 @@ invoke_block(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct rb
|
|||
VM_GUARDED_PREV_EP(captured->ep),
|
||||
(VALUE)cref, /* cref or method */
|
||||
iseq->body->iseq_encoded + opt_pc,
|
||||
th->cfp->sp + arg_size, iseq->body->local_table_size - arg_size,
|
||||
th->ec.cfp->sp + arg_size,
|
||||
iseq->body->local_table_size - arg_size,
|
||||
iseq->body->stack_max);
|
||||
return vm_exec(th);
|
||||
}
|
||||
|
@ -974,7 +977,8 @@ invoke_bmethod(rb_thread_t *th, const rb_iseq_t *iseq, VALUE self, const struct
|
|||
VM_GUARDED_PREV_EP(captured->ep),
|
||||
(VALUE)me,
|
||||
iseq->body->iseq_encoded + opt_pc,
|
||||
th->cfp->sp + arg_size, iseq->body->local_table_size - arg_size,
|
||||
th->ec.cfp->sp + arg_size,
|
||||
iseq->body->local_table_size - arg_size,
|
||||
iseq->body->stack_max);
|
||||
|
||||
RUBY_DTRACE_METHOD_ENTRY_HOOK(th, me->owner, me->def->original_id);
|
||||
|
@ -993,7 +997,7 @@ invoke_iseq_block_from_c(rb_thread_t *th, const struct rb_captured_block *captur
|
|||
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
|
||||
int i, opt_pc;
|
||||
VALUE type = is_lambda ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
VALUE *sp = cfp->sp;
|
||||
const rb_callable_method_entry_t *me = th->passed_bmethod_me;
|
||||
th->passed_bmethod_me = NULL;
|
||||
|
@ -1049,7 +1053,7 @@ invoke_block_from_c_splattable(rb_thread_t *th, VALUE block_handler,
|
|||
static inline VALUE
|
||||
check_block_handler(rb_thread_t *th)
|
||||
{
|
||||
VALUE block_handler = VM_CF_BLOCK_HANDLER(th->cfp);
|
||||
VALUE block_handler = VM_CF_BLOCK_HANDLER(th->ec.cfp);
|
||||
VM_ASSERT(vm_block_handler_verify(block_handler));
|
||||
if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
|
||||
rb_vm_localjump_error("no block given", Qnil, 0);
|
||||
|
@ -1187,14 +1191,14 @@ static VALUE
|
|||
vm_svar_get(VALUE key)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
return vm_cfp_svar_get(th, th->cfp, key);
|
||||
return vm_cfp_svar_get(th, th->ec.cfp, key);
|
||||
}
|
||||
|
||||
static void
|
||||
vm_svar_set(VALUE key, VALUE val)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
vm_cfp_svar_set(th, th->cfp, key, val);
|
||||
vm_cfp_svar_set(th, th->ec.cfp, key, val);
|
||||
}
|
||||
|
||||
VALUE
|
||||
|
@ -1227,7 +1231,7 @@ VALUE
|
|||
rb_sourcefilename(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp) {
|
||||
return cfp->iseq->body->location.path;
|
||||
|
@ -1241,7 +1245,7 @@ const char *
|
|||
rb_sourcefile(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp) {
|
||||
return RSTRING_PTR(cfp->iseq->body->location.path);
|
||||
|
@ -1255,7 +1259,7 @@ int
|
|||
rb_sourceline(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp) {
|
||||
return rb_vm_get_sourceline(cfp);
|
||||
|
@ -1269,7 +1273,7 @@ VALUE
|
|||
rb_source_location(int *pline)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp) {
|
||||
if (pline) *pline = rb_vm_get_sourceline(cfp);
|
||||
|
@ -1293,7 +1297,7 @@ rb_cref_t *
|
|||
rb_vm_cref(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp == NULL) {
|
||||
return NULL;
|
||||
|
@ -1306,7 +1310,7 @@ rb_cref_t *
|
|||
rb_vm_cref_replace_with_duplicated_cref(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
|
||||
return cref;
|
||||
}
|
||||
|
@ -1315,7 +1319,7 @@ const rb_cref_t *
|
|||
rb_vm_cref_in_context(VALUE self, VALUE cbase)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
const rb_cref_t *cref;
|
||||
if (cfp->self != self) return NULL;
|
||||
if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
|
||||
|
@ -1340,7 +1344,7 @@ VALUE
|
|||
rb_vm_cbase(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp == 0) {
|
||||
rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
|
||||
|
@ -1443,7 +1447,7 @@ next_not_local_frame(rb_control_frame_t *cfp)
|
|||
static void
|
||||
vm_iter_break(rb_thread_t *th, VALUE val)
|
||||
{
|
||||
rb_control_frame_t *cfp = next_not_local_frame(th->cfp);
|
||||
rb_control_frame_t *cfp = next_not_local_frame(th->ec.cfp);
|
||||
const VALUE *ep = VM_CF_PREV_EP(cfp);
|
||||
const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(th, cfp, ep);
|
||||
|
||||
|
@ -1642,34 +1646,34 @@ hook_before_rewind(rb_thread_t *th, const rb_control_frame_t *cfp, int will_fini
|
|||
if (state == TAG_RAISE && RBASIC_CLASS(err) == rb_eSysStackError) {
|
||||
return;
|
||||
}
|
||||
switch (VM_FRAME_TYPE(th->cfp)) {
|
||||
switch (VM_FRAME_TYPE(th->ec.cfp)) {
|
||||
case VM_FRAME_MAGIC_METHOD:
|
||||
RUBY_DTRACE_METHOD_RETURN_HOOK(th, 0, 0);
|
||||
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0, 0, frame_return_value(err));
|
||||
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->ec.cfp->self, 0, 0, 0, frame_return_value(err));
|
||||
THROW_DATA_CONSUMED_SET(err);
|
||||
break;
|
||||
case VM_FRAME_MAGIC_BLOCK:
|
||||
case VM_FRAME_MAGIC_LAMBDA:
|
||||
if (VM_FRAME_BMETHOD_P(th->cfp)) {
|
||||
EXEC_EVENT_HOOK(th, RUBY_EVENT_B_RETURN, th->cfp->self, 0, 0, 0, frame_return_value(err));
|
||||
if (VM_FRAME_BMETHOD_P(th->ec.cfp)) {
|
||||
EXEC_EVENT_HOOK(th, RUBY_EVENT_B_RETURN, th->ec.cfp->self, 0, 0, 0, frame_return_value(err));
|
||||
|
||||
if (!will_finish_vm_exec) {
|
||||
/* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
|
||||
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self,
|
||||
rb_vm_frame_method_entry(th->cfp)->def->original_id,
|
||||
rb_vm_frame_method_entry(th->cfp)->called_id,
|
||||
rb_vm_frame_method_entry(th->cfp)->owner,
|
||||
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->ec.cfp->self,
|
||||
rb_vm_frame_method_entry(th->ec.cfp)->def->original_id,
|
||||
rb_vm_frame_method_entry(th->ec.cfp)->called_id,
|
||||
rb_vm_frame_method_entry(th->ec.cfp)->owner,
|
||||
frame_return_value(err));
|
||||
}
|
||||
THROW_DATA_CONSUMED_SET(err);
|
||||
}
|
||||
else {
|
||||
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_B_RETURN, th->cfp->self, 0, 0, 0, frame_return_value(err));
|
||||
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_B_RETURN, th->ec.cfp->self, 0, 0, 0, frame_return_value(err));
|
||||
THROW_DATA_CONSUMED_SET(err);
|
||||
}
|
||||
break;
|
||||
case VM_FRAME_MAGIC_CLASS:
|
||||
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->cfp->self, 0, 0, 0, Qnil);
|
||||
EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->ec.cfp->self, 0, 0, 0, Qnil);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1789,20 +1793,20 @@ vm_exec(rb_thread_t *th)
|
|||
cont_pc = cont_sp = 0;
|
||||
catch_iseq = NULL;
|
||||
|
||||
while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
|
||||
if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
|
||||
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self,
|
||||
rb_vm_frame_method_entry(th->cfp)->def->original_id,
|
||||
rb_vm_frame_method_entry(th->cfp)->called_id,
|
||||
rb_vm_frame_method_entry(th->cfp)->owner, Qnil);
|
||||
while (th->ec.cfp->pc == 0 || th->ec.cfp->iseq == 0) {
|
||||
if (UNLIKELY(VM_FRAME_TYPE(th->ec.cfp) == VM_FRAME_MAGIC_CFUNC)) {
|
||||
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->ec.cfp->self,
|
||||
rb_vm_frame_method_entry(th->ec.cfp)->def->original_id,
|
||||
rb_vm_frame_method_entry(th->ec.cfp)->called_id,
|
||||
rb_vm_frame_method_entry(th->ec.cfp)->owner, Qnil);
|
||||
RUBY_DTRACE_CMETHOD_RETURN_HOOK(th,
|
||||
rb_vm_frame_method_entry(th->cfp)->owner,
|
||||
rb_vm_frame_method_entry(th->cfp)->def->original_id);
|
||||
rb_vm_frame_method_entry(th->ec.cfp)->owner,
|
||||
rb_vm_frame_method_entry(th->ec.cfp)->def->original_id);
|
||||
}
|
||||
rb_vm_pop_frame(th);
|
||||
}
|
||||
|
||||
cfp = th->cfp;
|
||||
cfp = th->ec.cfp;
|
||||
epc = cfp->pc - cfp->iseq->body->iseq_encoded;
|
||||
|
||||
escape_cfp = NULL;
|
||||
|
@ -1832,7 +1836,7 @@ vm_exec(rb_thread_t *th)
|
|||
th->errinfo = Qnil;
|
||||
result = THROW_DATA_VAL(err);
|
||||
THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
|
||||
hook_before_rewind(th, th->cfp, TRUE, state, err);
|
||||
hook_before_rewind(th, th->ec.cfp, TRUE, state, err);
|
||||
rb_vm_pop_frame(th);
|
||||
goto finish_vme;
|
||||
}
|
||||
|
@ -1844,7 +1848,7 @@ vm_exec(rb_thread_t *th)
|
|||
#if OPT_STACK_CACHING
|
||||
initial = THROW_DATA_VAL(err);
|
||||
#else
|
||||
*th->cfp->sp++ = THROW_DATA_VAL(err);
|
||||
*th->ec.cfp->sp++ = THROW_DATA_VAL(err);
|
||||
#endif
|
||||
th->errinfo = Qnil;
|
||||
goto vm_loop_start;
|
||||
|
@ -1915,7 +1919,7 @@ vm_exec(rb_thread_t *th)
|
|||
#if OPT_STACK_CACHING
|
||||
initial = THROW_DATA_VAL(err);
|
||||
#else
|
||||
*th->cfp->sp++ = THROW_DATA_VAL(err);
|
||||
*th->ec.cfp->sp++ = THROW_DATA_VAL(err);
|
||||
#endif
|
||||
}
|
||||
th->errinfo = Qnil;
|
||||
|
@ -1974,9 +1978,9 @@ vm_exec(rb_thread_t *th)
|
|||
goto vm_loop_start;
|
||||
}
|
||||
else {
|
||||
hook_before_rewind(th, th->cfp, FALSE, state, err);
|
||||
hook_before_rewind(th, th->ec.cfp, FALSE, state, err);
|
||||
|
||||
if (VM_FRAME_FINISHED_P(th->cfp)) {
|
||||
if (VM_FRAME_FINISHED_P(th->ec.cfp)) {
|
||||
rb_vm_pop_frame(th);
|
||||
th->errinfo = (VALUE)err;
|
||||
TH_TMPPOP_TAG();
|
||||
|
@ -2035,7 +2039,7 @@ rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *cal
|
|||
int
|
||||
rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, ID *called_idp, VALUE *klassp)
|
||||
{
|
||||
return rb_vm_control_frame_id_and_class(th->cfp, idp, called_idp, klassp);
|
||||
return rb_vm_control_frame_id_and_class(th->ec.cfp, idp, called_idp, klassp);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -2047,7 +2051,7 @@ rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
|
|||
VALUE
|
||||
rb_thread_current_status(const rb_thread_t *th)
|
||||
{
|
||||
const rb_control_frame_t *cfp = th->cfp;
|
||||
const rb_control_frame_t *cfp = th->ec.cfp;
|
||||
const rb_callable_method_entry_t *me;
|
||||
VALUE str = Qnil;
|
||||
|
||||
|
@ -2073,7 +2077,7 @@ rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
|
|||
VALUE block_handler, VALUE filename)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
const rb_control_frame_t *reg_cfp = th->cfp;
|
||||
const rb_control_frame_t *reg_cfp = th->ec.cfp;
|
||||
const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
|
||||
VALUE val;
|
||||
|
||||
|
@ -2348,11 +2352,11 @@ rb_thread_mark(void *ptr)
|
|||
rb_thread_t *th = ptr;
|
||||
RUBY_MARK_ENTER("thread");
|
||||
|
||||
if (th->stack) {
|
||||
VALUE *p = th->stack;
|
||||
VALUE *sp = th->cfp->sp;
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
|
||||
if (th->ec.stack) {
|
||||
VALUE *p = th->ec.stack;
|
||||
VALUE *sp = th->ec.cfp->sp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
rb_control_frame_t *limit_cfp = (void *)(th->ec.stack + th->ec.stack_size);
|
||||
|
||||
rb_gc_mark_values((long)(sp - p), p);
|
||||
|
||||
|
@ -2413,7 +2417,7 @@ thread_free(void *ptr)
|
|||
RUBY_FREE_ENTER("thread");
|
||||
|
||||
if (!th->root_fiber) {
|
||||
RUBY_FREE_UNLESS_NULL(th->stack);
|
||||
RUBY_FREE_UNLESS_NULL(th->ec.stack);
|
||||
}
|
||||
|
||||
if (th->locking_mutex != Qfalse) {
|
||||
|
@ -2451,7 +2455,7 @@ thread_memsize(const void *ptr)
|
|||
size_t size = sizeof(rb_thread_t);
|
||||
|
||||
if (!th->root_fiber) {
|
||||
size += th->stack_size * sizeof(VALUE);
|
||||
size += th->ec.stack_size * sizeof(VALUE);
|
||||
}
|
||||
if (th->local_storage) {
|
||||
size += st_memsize(th->local_storage);
|
||||
|
@ -2501,18 +2505,18 @@ th_init(rb_thread_t *th, VALUE self)
|
|||
/* altstack of main thread is reallocated in another place */
|
||||
th->altstack = malloc(rb_sigaltstack_size());
|
||||
#endif
|
||||
/* th->stack_size is word number.
|
||||
/* th->ec.stack_size is word number.
|
||||
* th->vm->default_params.thread_vm_stack_size is byte size.
|
||||
*/
|
||||
th->stack_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
|
||||
th->stack = thread_recycle_stack(th->stack_size);
|
||||
th->ec.stack_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
|
||||
th->ec.stack = thread_recycle_stack(th->ec.stack_size);
|
||||
|
||||
th->cfp = (void *)(th->stack + th->stack_size);
|
||||
th->ec.cfp = (void *)(th->ec.stack + th->ec.stack_size);
|
||||
|
||||
vm_push_frame(th, 0 /* dummy iseq */, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */,
|
||||
Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
|
||||
0 /* dummy cref/me */,
|
||||
0 /* dummy pc */, th->stack, 0, 0);
|
||||
0 /* dummy pc */, th->ec.stack, 0, 0);
|
||||
|
||||
th->status = THREAD_RUNNABLE;
|
||||
th->errinfo = Qnil;
|
||||
|
@ -2586,11 +2590,11 @@ vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval, int is_single
|
|||
|
||||
#define REWIND_CFP(expr) do { \
|
||||
rb_thread_t *th__ = GET_THREAD(); \
|
||||
VALUE *const curr_sp = (th__->cfp++)->sp; \
|
||||
VALUE *const saved_sp = th__->cfp->sp; \
|
||||
th__->cfp->sp = curr_sp; \
|
||||
VALUE *const curr_sp = (th__->ec.cfp++)->sp; \
|
||||
VALUE *const saved_sp = th__->ec.cfp->sp; \
|
||||
th__->ec.cfp->sp = curr_sp; \
|
||||
expr; \
|
||||
(th__->cfp--)->sp = saved_sp; \
|
||||
(th__->ec.cfp--)->sp = saved_sp; \
|
||||
} while (0)
|
||||
|
||||
static VALUE
|
||||
|
@ -3057,12 +3061,12 @@ Init_VM(void)
|
|||
rb_vm_living_threads_insert(vm, th);
|
||||
|
||||
rb_gc_register_mark_object((VALUE)iseq);
|
||||
th->cfp->iseq = iseq;
|
||||
th->cfp->pc = iseq->body->iseq_encoded;
|
||||
th->cfp->self = th->top_self;
|
||||
th->ec.cfp->iseq = iseq;
|
||||
th->ec.cfp->pc = iseq->body->iseq_encoded;
|
||||
th->ec.cfp->self = th->top_self;
|
||||
|
||||
VM_ENV_FLAGS_UNSET(th->cfp->ep, VM_FRAME_FLAG_CFRAME);
|
||||
VM_STACK_ENV_WRITE(th->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE));
|
||||
VM_ENV_FLAGS_UNSET(th->ec.cfp->ep, VM_FRAME_FLAG_CFRAME);
|
||||
VM_STACK_ENV_WRITE(th->ec.cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE));
|
||||
|
||||
/*
|
||||
* The Binding of the top level scope
|
||||
|
@ -3080,7 +3084,7 @@ void
|
|||
rb_vm_set_progname(VALUE filename)
|
||||
{
|
||||
rb_thread_t *th = GET_VM()->main_thread;
|
||||
rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
|
||||
rb_control_frame_t *cfp = (void *)(th->ec.stack + th->ec.stack_size);
|
||||
--cfp;
|
||||
RB_OBJ_WRITE(cfp->iseq, &cfp->iseq->body->location.path, filename);
|
||||
}
|
||||
|
|
11
vm_args.c
11
vm_args.c
|
@ -526,7 +526,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
|
|||
int given_argc;
|
||||
struct args_info args_body, *args;
|
||||
VALUE keyword_hash = Qnil;
|
||||
VALUE * const orig_sp = th->cfp->sp;
|
||||
VALUE * const orig_sp = th->ec.cfp->sp;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
|
@ -546,7 +546,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
|
|||
for (i=calling->argc; i<iseq->body->param.size; i++) {
|
||||
locals[i] = Qnil;
|
||||
}
|
||||
th->cfp->sp = &locals[i];
|
||||
th->ec.cfp->sp = &locals[i];
|
||||
|
||||
/* setup args */
|
||||
args = &args_body;
|
||||
|
@ -607,7 +607,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
|
|||
}
|
||||
else {
|
||||
if (arg_setup_type == arg_setup_block) {
|
||||
CHECK_VM_STACK_OVERFLOW(th->cfp, min_argc);
|
||||
CHECK_VM_STACK_OVERFLOW(th->ec.cfp, min_argc);
|
||||
given_argc = min_argc;
|
||||
args_extend(args, min_argc);
|
||||
}
|
||||
|
@ -693,7 +693,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
|
|||
}
|
||||
#endif
|
||||
|
||||
th->cfp->sp = orig_sp;
|
||||
th->ec.cfp->sp = orig_sp;
|
||||
return opt_pc;
|
||||
}
|
||||
|
||||
|
@ -705,7 +705,8 @@ raise_argument_error(rb_thread_t *th, const rb_iseq_t *iseq, const VALUE exc)
|
|||
if (iseq) {
|
||||
vm_push_frame(th, iseq, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL, Qnil /* self */,
|
||||
VM_BLOCK_HANDLER_NONE /* specval*/, Qfalse /* me or cref */,
|
||||
iseq->body->iseq_encoded, th->cfp->sp, 0, 0 /* stack_max */);
|
||||
iseq->body->iseq_encoded,
|
||||
th->ec.cfp->sp, 0, 0 /* stack_max */);
|
||||
at = rb_threadptr_backtrace_object(th);
|
||||
rb_vm_pop_frame(th);
|
||||
}
|
||||
|
|
|
@ -427,7 +427,7 @@ backtrace_each(rb_thread_t *th,
|
|||
void (*iter_cfunc)(void *arg, const rb_control_frame_t *cfp, ID mid),
|
||||
void *arg)
|
||||
{
|
||||
rb_control_frame_t *last_cfp = th->cfp;
|
||||
rb_control_frame_t *last_cfp = th->ec.cfp;
|
||||
rb_control_frame_t *start_cfp = RUBY_VM_END_CONTROL_FRAME(th);
|
||||
rb_control_frame_t *cfp;
|
||||
ptrdiff_t size, i;
|
||||
|
@ -439,7 +439,7 @@ backtrace_each(rb_thread_t *th,
|
|||
* top frame
|
||||
* ...
|
||||
* 2nd frame <- lev:0
|
||||
* current frame <- th->cfp
|
||||
* current frame <- th->ec.cfp
|
||||
*/
|
||||
|
||||
start_cfp =
|
||||
|
@ -1178,7 +1178,7 @@ rb_debug_inspector_open(rb_debug_inspector_func_t func, void *data)
|
|||
volatile VALUE MAYBE_UNUSED(result);
|
||||
|
||||
dbg_context.th = th;
|
||||
dbg_context.cfp = dbg_context.th->cfp;
|
||||
dbg_context.cfp = dbg_context.th->ec.cfp;
|
||||
dbg_context.backtrace = rb_threadptr_backtrace_location_ary(th, 0, 0);
|
||||
dbg_context.backtrace_size = RARRAY_LEN(dbg_context.backtrace);
|
||||
dbg_context.contexts = collect_caller_bindings(th);
|
||||
|
@ -1248,7 +1248,7 @@ rb_profile_frames(int start, int limit, VALUE *buff, int *lines)
|
|||
{
|
||||
int i;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = th->cfp, *end_cfp = RUBY_VM_END_CONTROL_FRAME(th);
|
||||
rb_control_frame_t *cfp = th->ec.cfp, *end_cfp = RUBY_VM_END_CONTROL_FRAME(th);
|
||||
const rb_callable_method_entry_t *cme;
|
||||
|
||||
for (i=0; i<limit && cfp != end_cfp;) {
|
||||
|
|
18
vm_core.h
18
vm_core.h
|
@ -696,15 +696,19 @@ typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
|
|||
|
||||
typedef struct rb_fiber_struct rb_fiber_t;
|
||||
|
||||
typedef struct rb_thread_context_struct {
|
||||
/* execution information */
|
||||
VALUE *stack; /* must free, must mark */
|
||||
size_t stack_size; /* size in word (byte size / sizeof(VALUE)) */
|
||||
rb_control_frame_t *cfp;
|
||||
} rb_execution_context_t;
|
||||
|
||||
typedef struct rb_thread_struct {
|
||||
struct list_node vmlt_node;
|
||||
VALUE self;
|
||||
rb_vm_t *vm;
|
||||
|
||||
/* execution information */
|
||||
VALUE *stack; /* must free, must mark */
|
||||
size_t stack_size; /* size in word (byte size / sizeof(VALUE)) */
|
||||
rb_control_frame_t *cfp;
|
||||
rb_execution_context_t ec;
|
||||
int safe_level;
|
||||
int raised_flag;
|
||||
VALUE last_status; /* $? */
|
||||
|
@ -1172,7 +1176,7 @@ VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
|
|||
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
|
||||
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
|
||||
#define RUBY_VM_END_CONTROL_FRAME(th) \
|
||||
((rb_control_frame_t *)((th)->stack + (th)->stack_size))
|
||||
((rb_control_frame_t *)((th)->ec.stack + (th)->ec.stack_size))
|
||||
#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
|
||||
((void *)(ecfp) > (void *)(cfp))
|
||||
#define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
|
||||
|
@ -1405,7 +1409,7 @@ extern void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *);
|
|||
extern void rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *_pc);
|
||||
extern void rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp);
|
||||
|
||||
#define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->cfp)
|
||||
#define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->ec.cfp)
|
||||
#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
|
||||
void rb_vm_bugreport(const void *);
|
||||
NORETURN(void rb_bug_context(const void *, const char *fmt, ...));
|
||||
|
@ -1608,7 +1612,7 @@ ruby_exec_event_hook_orig(rb_thread_t *const th, const rb_event_flag_t flag,
|
|||
struct rb_trace_arg_struct trace_arg;
|
||||
trace_arg.event = flag;
|
||||
trace_arg.th = th;
|
||||
trace_arg.cfp = th->cfp;
|
||||
trace_arg.cfp = th->ec.cfp;
|
||||
trace_arg.self = self;
|
||||
trace_arg.id = id;
|
||||
trace_arg.called_id = called_id;
|
||||
|
|
43
vm_dump.c
43
vm_dump.c
|
@ -22,13 +22,14 @@
|
|||
#define MAX_POSBUF 128
|
||||
|
||||
#define VM_CFP_CNT(th, cfp) \
|
||||
((rb_control_frame_t *)((th)->stack + (th)->stack_size) - (rb_control_frame_t *)(cfp))
|
||||
((rb_control_frame_t *)((th)->ec.stack + (th)->ec.stack_size) - \
|
||||
(rb_control_frame_t *)(cfp))
|
||||
|
||||
static void
|
||||
control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
|
||||
{
|
||||
ptrdiff_t pc = -1;
|
||||
ptrdiff_t ep = cfp->ep - th->stack;
|
||||
ptrdiff_t ep = cfp->ep - th->ec.stack;
|
||||
char ep_in_heap = ' ';
|
||||
char posbuf[MAX_POSBUF+1];
|
||||
int line = 0;
|
||||
|
@ -38,7 +39,7 @@ control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
|
|||
|
||||
const rb_callable_method_entry_t *me;
|
||||
|
||||
if (ep < 0 || (size_t)ep > th->stack_size) {
|
||||
if (ep < 0 || (size_t)ep > th->ec.stack_size) {
|
||||
ep = (ptrdiff_t)cfp->ep;
|
||||
ep_in_heap = 'p';
|
||||
}
|
||||
|
@ -117,14 +118,14 @@ control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
|
|||
}
|
||||
|
||||
fprintf(stderr, "c:%04"PRIdPTRDIFF" ",
|
||||
((rb_control_frame_t *)(th->stack + th->stack_size) - cfp));
|
||||
((rb_control_frame_t *)(th->ec.stack + th->ec.stack_size) - cfp));
|
||||
if (pc == -1) {
|
||||
fprintf(stderr, "p:---- ");
|
||||
}
|
||||
else {
|
||||
fprintf(stderr, "p:%04"PRIdPTRDIFF" ", pc);
|
||||
}
|
||||
fprintf(stderr, "s:%04"PRIdPTRDIFF" ", cfp->sp - th->stack);
|
||||
fprintf(stderr, "s:%04"PRIdPTRDIFF" ", cfp->sp - th->ec.stack);
|
||||
fprintf(stderr, ep_in_heap == ' ' ? "e:%06"PRIdPTRDIFF" " : "E:%06"PRIxPTRDIFF" ", ep % 10000);
|
||||
fprintf(stderr, "%-6s", magic);
|
||||
if (line) {
|
||||
|
@ -150,12 +151,12 @@ rb_vmdebug_stack_dump_raw(rb_thread_t *th, rb_control_frame_t *cfp)
|
|||
VALUE *p, *st, *t;
|
||||
|
||||
fprintf(stderr, "-- stack frame ------------\n");
|
||||
for (p = st = th->stack; p < sp; p++) {
|
||||
for (p = st = th->ec.stack; p < sp; p++) {
|
||||
fprintf(stderr, "%04ld (%p): %08"PRIxVALUE, (long)(p - st), p, *p);
|
||||
|
||||
t = (VALUE *)*p;
|
||||
if (th->stack <= t && t < sp) {
|
||||
fprintf(stderr, " (= %ld)", (long)((VALUE *)GC_GUARDED_PTR_REF(t) - th->stack));
|
||||
if (th->ec.stack <= t && t < sp) {
|
||||
fprintf(stderr, " (= %ld)", (long)((VALUE *)GC_GUARDED_PTR_REF(t) - th->ec.stack));
|
||||
}
|
||||
|
||||
if (p == ep)
|
||||
|
@ -167,7 +168,7 @@ rb_vmdebug_stack_dump_raw(rb_thread_t *th, rb_control_frame_t *cfp)
|
|||
|
||||
fprintf(stderr, "-- Control frame information "
|
||||
"-----------------------------------------------\n");
|
||||
while ((void *)cfp < (void *)(th->stack + th->stack_size)) {
|
||||
while ((void *)cfp < (void *)(th->ec.stack + th->ec.stack_size)) {
|
||||
control_frame_dump(th, cfp);
|
||||
cfp++;
|
||||
}
|
||||
|
@ -178,7 +179,7 @@ void
|
|||
rb_vmdebug_stack_dump_raw_current(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_vmdebug_stack_dump_raw(th, th->cfp);
|
||||
rb_vmdebug_stack_dump_raw(th, th->ec.cfp);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -219,7 +220,7 @@ rb_vmdebug_stack_dump_th(VALUE thval)
|
|||
{
|
||||
rb_thread_t *th;
|
||||
GetThreadPtr(thval, th);
|
||||
rb_vmdebug_stack_dump_raw(th, th->cfp);
|
||||
rb_vmdebug_stack_dump_raw(th, th->ec.cfp);
|
||||
}
|
||||
|
||||
#if VMDEBUG > 2
|
||||
|
@ -293,7 +294,7 @@ vm_stack_dump_each(rb_thread_t *th, rb_control_frame_t *cfp)
|
|||
break;
|
||||
}
|
||||
fprintf(stderr, " stack %2d: %8s (%"PRIdPTRDIFF")\n", i, StringValueCStr(rstr),
|
||||
(ptr - th->stack));
|
||||
(ptr - th->ec.stack));
|
||||
}
|
||||
}
|
||||
else if (VM_FRAME_FINISHED_P(cfp)) {
|
||||
|
@ -313,22 +314,22 @@ vm_stack_dump_each(rb_thread_t *th, rb_control_frame_t *cfp)
|
|||
void
|
||||
rb_vmdebug_debug_print_register(rb_thread_t *th)
|
||||
{
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
ptrdiff_t pc = -1;
|
||||
ptrdiff_t ep = cfp->ep - th->stack;
|
||||
ptrdiff_t ep = cfp->ep - th->ec.stack;
|
||||
ptrdiff_t cfpi;
|
||||
|
||||
if (VM_FRAME_RUBYFRAME_P(cfp)) {
|
||||
pc = cfp->pc - cfp->iseq->body->iseq_encoded;
|
||||
}
|
||||
|
||||
if (ep < 0 || (size_t)ep > th->stack_size) {
|
||||
if (ep < 0 || (size_t)ep > th->ec.stack_size) {
|
||||
ep = -1;
|
||||
}
|
||||
|
||||
cfpi = ((rb_control_frame_t *)(th->stack + th->stack_size)) - cfp;
|
||||
cfpi = ((rb_control_frame_t *)(th->ec.stack + th->ec.stack_size)) - cfp;
|
||||
fprintf(stderr, " [PC] %04"PRIdPTRDIFF", [SP] %04"PRIdPTRDIFF", [EP] %04"PRIdPTRDIFF", [CFP] %04"PRIdPTRDIFF"\n",
|
||||
pc, (cfp->sp - th->stack), ep, cfpi);
|
||||
pc, (cfp->sp - th->ec.stack), ep, cfpi);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -352,7 +353,7 @@ rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE
|
|||
printf(" ");
|
||||
}
|
||||
printf("| ");
|
||||
if(0)printf("[%03ld] ", (long)(cfp->sp - th->stack));
|
||||
if(0)printf("[%03ld] ", (long)(cfp->sp - th->ec.stack));
|
||||
|
||||
/* printf("%3"PRIdPTRDIFF" ", VM_CFP_CNT(th, cfp)); */
|
||||
if (pc >= 0) {
|
||||
|
@ -387,7 +388,7 @@ rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp
|
|||
|
||||
#if VMDEBUG > 2
|
||||
/* stack_dump_thobj(th); */
|
||||
vm_stack_dump_each(th, th->cfp);
|
||||
vm_stack_dump_each(th, th->ec.cfp);
|
||||
|
||||
#if OPT_STACK_CACHING
|
||||
{
|
||||
|
@ -409,7 +410,7 @@ rb_vmdebug_thread_dump_state(VALUE self)
|
|||
rb_thread_t *th;
|
||||
rb_control_frame_t *cfp;
|
||||
GetThreadPtr(self, th);
|
||||
cfp = th->cfp;
|
||||
cfp = th->ec.cfp;
|
||||
|
||||
fprintf(stderr, "Thread state dump:\n");
|
||||
fprintf(stderr, "pc : %p, sp : %p\n", (void *)cfp->pc, (void *)cfp->sp);
|
||||
|
@ -1065,6 +1066,6 @@ rb_vmdebug_stack_dump_all_threads(void)
|
|||
#else
|
||||
fprintf(stderr, "th: %p, native_id: %p\n", th, (void *)th->thread_id);
|
||||
#endif
|
||||
rb_vmdebug_stack_dump_raw(th, th->cfp);
|
||||
rb_vmdebug_stack_dump_raw(th, th->ec.cfp);
|
||||
}
|
||||
}
|
||||
|
|
46
vm_eval.c
46
vm_eval.c
|
@ -68,7 +68,7 @@ vm_call0_cfunc(rb_thread_t* th, struct rb_calling_info *calling, const struct rb
|
|||
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, cc->me->owner, ci->mid);
|
||||
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, calling->recv, ci->mid, ci->mid, cc->me->owner, Qnil);
|
||||
{
|
||||
rb_control_frame_t *reg_cfp = th->cfp;
|
||||
rb_control_frame_t *reg_cfp = th->ec.cfp;
|
||||
const rb_callable_method_entry_t *me = cc->me;
|
||||
const rb_method_cfunc_t *cfunc = &me->def->body.cfunc;
|
||||
int len = cfunc->argc;
|
||||
|
@ -82,7 +82,7 @@ vm_call0_cfunc(rb_thread_t* th, struct rb_calling_info *calling, const struct rb
|
|||
VM_PROFILE_UP(C2C_CALL);
|
||||
val = (*cfunc->invoker)(cfunc->func, recv, argc, argv);
|
||||
|
||||
if (reg_cfp == th->cfp) {
|
||||
if (reg_cfp == th->ec.cfp) {
|
||||
if (UNLIKELY(th->passed_ci != ci)) {
|
||||
rb_bug("vm_call0_cfunc: passed_ci error (ci: %p, passed_ci: %p)", ci, th->passed_ci);
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ vm_call0_cfunc_with_frame(rb_thread_t* th, struct rb_calling_info *calling, cons
|
|||
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, me->def->original_id);
|
||||
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->def->original_id, mid, me->owner, Qnil);
|
||||
{
|
||||
rb_control_frame_t *reg_cfp = th->cfp;
|
||||
rb_control_frame_t *reg_cfp = th->ec.cfp;
|
||||
|
||||
vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL, recv,
|
||||
block_handler, (VALUE)me,
|
||||
|
@ -155,7 +155,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
|
|||
switch (cc->me->def->type) {
|
||||
case VM_METHOD_TYPE_ISEQ:
|
||||
{
|
||||
rb_control_frame_t *reg_cfp = th->cfp;
|
||||
rb_control_frame_t *reg_cfp = th->ec.cfp;
|
||||
int i;
|
||||
|
||||
CHECK_VM_STACK_OVERFLOW(reg_cfp, calling->argc + 1);
|
||||
|
@ -166,7 +166,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
|
|||
}
|
||||
|
||||
vm_call_iseq_setup(th, reg_cfp, calling, ci, cc);
|
||||
VM_ENV_FLAGS_SET(th->cfp->ep, VM_FRAME_FLAG_FINISH);
|
||||
VM_ENV_FLAGS_SET(th->ec.cfp->ep, VM_FRAME_FLAG_FINISH);
|
||||
return vm_exec(th); /* CHECK_INTS in this function */
|
||||
}
|
||||
case VM_METHOD_TYPE_NOTIMPLEMENTED:
|
||||
|
@ -253,10 +253,10 @@ rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc, const VALUE *argv, c
|
|||
static inline VALUE
|
||||
vm_call_super(rb_thread_t *th, int argc, const VALUE *argv)
|
||||
{
|
||||
VALUE recv = th->cfp->self;
|
||||
VALUE recv = th->ec.cfp->self;
|
||||
VALUE klass;
|
||||
ID id;
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
|
||||
|
||||
if (VM_FRAME_RUBYFRAME_P(cfp)) {
|
||||
|
@ -289,7 +289,7 @@ rb_current_receiver(void)
|
|||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp;
|
||||
if (!th || !(cfp = th->cfp))
|
||||
if (!th || !(cfp = th->ec.cfp))
|
||||
rb_raise(rb_eRuntimeError, "no self, no life");
|
||||
return cfp->self;
|
||||
}
|
||||
|
@ -392,7 +392,7 @@ check_funcall_respond_to(rb_thread_t *th, VALUE klass, VALUE recv, ID mid)
|
|||
static int
|
||||
check_funcall_callable(rb_thread_t *th, const rb_callable_method_entry_t *me)
|
||||
{
|
||||
return rb_method_call_status(th, me, CALL_FCALL, th->cfp->self) == MISSING_NONE;
|
||||
return rb_method_call_status(th, me, CALL_FCALL, th->ec.cfp->self) == MISSING_NONE;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
|
@ -620,7 +620,7 @@ static inline VALUE
|
|||
rb_call(VALUE recv, ID mid, int argc, const VALUE *argv, call_type scope)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
return rb_call0(recv, mid, argc, argv, scope, th->cfp->self);
|
||||
return rb_call0(recv, mid, argc, argv, scope, th->ec.cfp->self);
|
||||
}
|
||||
|
||||
NORETURN(static void raise_method_missing(rb_thread_t *th, int argc, const VALUE *argv,
|
||||
|
@ -885,7 +885,7 @@ rb_funcall_with_block(VALUE recv, ID mid, int argc, const VALUE *argv, VALUE pas
|
|||
static VALUE *
|
||||
current_vm_stack_arg(rb_thread_t *th, const VALUE *argv)
|
||||
{
|
||||
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
|
||||
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp);
|
||||
if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, prev_cfp)) return NULL;
|
||||
if (prev_cfp->sp + 1 != argv) return NULL;
|
||||
return prev_cfp->sp + 1;
|
||||
|
@ -904,7 +904,7 @@ send_internal(int argc, const VALUE *argv, VALUE recv, call_type scope)
|
|||
self = Qundef;
|
||||
}
|
||||
else {
|
||||
self = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp)->self;
|
||||
self = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp)->self;
|
||||
}
|
||||
|
||||
if (argc == 0) {
|
||||
|
@ -1147,7 +1147,7 @@ rb_iterate0(VALUE (* it_proc) (VALUE), VALUE data1,
|
|||
{
|
||||
int state;
|
||||
volatile VALUE retval = Qnil;
|
||||
rb_control_frame_t *const cfp = th->cfp;
|
||||
rb_control_frame_t *const cfp = th->ec.cfp;
|
||||
|
||||
TH_PUSH_TAG(th);
|
||||
state = TH_EXEC_TAG();
|
||||
|
@ -1314,7 +1314,7 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
|
|||
base_block = &bind->block;
|
||||
}
|
||||
else {
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp != 0) {
|
||||
block.as.captured = *VM_CFP_TO_CAPTURED_BLOCK(cfp);
|
||||
|
@ -1363,7 +1363,7 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
|
|||
|
||||
/* save new env */
|
||||
if (bind && iseq->body->local_table_size > 0) {
|
||||
vm_bind_update_env(bind, vm_make_env_object(th, th->cfp));
|
||||
vm_bind_update_env(bind, vm_make_env_object(th, th->ec.cfp));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1569,7 +1569,7 @@ static VALUE
|
|||
yield_under(VALUE under, VALUE self, int argc, const VALUE *argv)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
VALUE block_handler = VM_CF_BLOCK_HANDLER(cfp);
|
||||
VALUE new_block_handler = 0;
|
||||
const struct rb_captured_block *captured = NULL;
|
||||
|
@ -1603,7 +1603,7 @@ yield_under(VALUE under, VALUE self, int argc, const VALUE *argv)
|
|||
new_captured.self = self;
|
||||
ep = captured->ep;
|
||||
|
||||
VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
|
||||
VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->ec.cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
|
||||
}
|
||||
|
||||
cref = vm_cref_push(th, under, ep, TRUE);
|
||||
|
@ -1614,7 +1614,7 @@ VALUE
|
|||
rb_yield_refine_block(VALUE refinement, VALUE refinements)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
VALUE block_handler = VM_CF_BLOCK_HANDLER(th->cfp);
|
||||
VALUE block_handler = VM_CF_BLOCK_HANDLER(th->ec.cfp);
|
||||
|
||||
if (vm_block_handler_type(block_handler) != block_handler_type_iseq) {
|
||||
rb_bug("rb_yield_refine_block: an iseq block is required");
|
||||
|
@ -1626,7 +1626,7 @@ rb_yield_refine_block(VALUE refinement, VALUE refinements)
|
|||
const VALUE *ep = captured->ep;
|
||||
rb_cref_t *cref = vm_cref_push(th, refinement, ep, TRUE);
|
||||
CREF_REFINEMENTS_SET(cref, refinements);
|
||||
VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
|
||||
VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->ec.cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
|
||||
new_captured.self = refinement;
|
||||
return vm_yield_with_cref(th, 0, NULL, cref, FALSE);
|
||||
}
|
||||
|
@ -2028,7 +2028,7 @@ vm_catch_protect(VALUE tag, rb_block_call_func *func, VALUE data,
|
|||
{
|
||||
int state;
|
||||
VALUE val = Qnil; /* OK */
|
||||
rb_control_frame_t *volatile saved_cfp = th->cfp;
|
||||
rb_control_frame_t *volatile saved_cfp = th->ec.cfp;
|
||||
|
||||
TH_PUSH_TAG(th);
|
||||
|
||||
|
@ -2107,7 +2107,7 @@ rb_f_local_variables(void)
|
|||
struct local_var_list vars;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp =
|
||||
vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp));
|
||||
vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(th->ec.cfp));
|
||||
unsigned int i;
|
||||
|
||||
local_var_list_init(&vars);
|
||||
|
@ -2163,7 +2163,7 @@ VALUE
|
|||
rb_f_block_given_p(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
cfp = vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
|
||||
|
||||
if (cfp != NULL && VM_CF_BLOCK_HANDLER(cfp) != VM_BLOCK_HANDLER_NONE) {
|
||||
|
@ -2178,7 +2178,7 @@ VALUE
|
|||
rb_current_realfilepath(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
cfp = vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
|
||||
if (cfp != 0) return cfp->iseq->body->location.absolute_path;
|
||||
return Qnil;
|
||||
|
|
|
@ -88,7 +88,7 @@ vm_exec_core(rb_thread_t *th, VALUE initial)
|
|||
#undef RESTORE_REGS
|
||||
#define RESTORE_REGS() \
|
||||
{ \
|
||||
VM_REG_CFP = th->cfp; \
|
||||
VM_REG_CFP = th->ec.cfp; \
|
||||
reg_pc = reg_cfp->pc; \
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ vm_exec_core(rb_thread_t *th, VALUE initial)
|
|||
return (VALUE)insns_address_table;
|
||||
}
|
||||
#endif
|
||||
reg_cfp = th->cfp;
|
||||
reg_cfp = th->ec.cfp;
|
||||
reg_pc = reg_cfp->pc;
|
||||
|
||||
#if OPT_STACK_CACHING
|
||||
|
@ -146,7 +146,7 @@ rb_vm_get_insns_address_table(void)
|
|||
static VALUE
|
||||
vm_exec_core(rb_thread_t *th, VALUE initial)
|
||||
{
|
||||
register rb_control_frame_t *reg_cfp = th->cfp;
|
||||
register rb_control_frame_t *reg_cfp = th->ec.cfp;
|
||||
|
||||
while (1) {
|
||||
reg_cfp = ((rb_insn_func_t) (*GET_PC()))(th, reg_cfp);
|
||||
|
|
|
@ -157,7 +157,7 @@ default: \
|
|||
|
||||
#endif
|
||||
|
||||
#define VM_SP_CNT(th, sp) ((sp) - (th)->stack)
|
||||
#define VM_SP_CNT(th, sp) ((sp) - (th)->ec.stack)
|
||||
|
||||
#if OPT_CALL_THREADED_CODE
|
||||
#define THROW_EXCEPTION(exc) do { \
|
||||
|
|
|
@ -205,7 +205,7 @@ vm_push_frame(rb_thread_t *th,
|
|||
int local_size,
|
||||
int stack_max)
|
||||
{
|
||||
rb_control_frame_t *const cfp = th->cfp - 1;
|
||||
rb_control_frame_t *const cfp = th->ec.cfp - 1;
|
||||
int i;
|
||||
|
||||
vm_check_frame(type, specval, cref_or_me, iseq);
|
||||
|
@ -214,7 +214,7 @@ vm_push_frame(rb_thread_t *th,
|
|||
/* check stack overflow */
|
||||
CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
|
||||
|
||||
th->cfp = cfp;
|
||||
th->ec.cfp = cfp;
|
||||
|
||||
/* setup new frame */
|
||||
cfp->pc = (VALUE *)pc;
|
||||
|
@ -275,7 +275,7 @@ vm_pop_frame(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *ep)
|
|||
if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
|
||||
if (VMDEBUG == 2) SDR();
|
||||
|
||||
th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
|
||||
th->ec.cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
|
||||
|
||||
return flags & VM_FRAME_FLAG_FINISH;
|
||||
}
|
||||
|
@ -283,7 +283,7 @@ vm_pop_frame(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *ep)
|
|||
void
|
||||
rb_vm_pop_frame(rb_thread_t *th)
|
||||
{
|
||||
vm_pop_frame(th, th->cfp, th->cfp->ep);
|
||||
vm_pop_frame(th, th->ec.cfp, th->ec.cfp->ep);
|
||||
}
|
||||
|
||||
/* method dispatch */
|
||||
|
@ -718,7 +718,7 @@ vm_cref_push(rb_thread_t *th, VALUE klass, const VALUE *ep, int pushed_by_eval)
|
|||
prev_cref = vm_env_cref(ep);
|
||||
}
|
||||
else {
|
||||
rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp) {
|
||||
prev_cref = vm_env_cref(cfp->ep);
|
||||
|
@ -791,7 +791,7 @@ vm_get_ev_const(rb_thread_t *th, VALUE orig_klass, ID id, int is_defined)
|
|||
|
||||
if (orig_klass == Qnil) {
|
||||
/* in current lexical scope */
|
||||
const rb_cref_t *root_cref = rb_vm_get_cref(th->cfp->ep);
|
||||
const rb_cref_t *root_cref = rb_vm_get_cref(th->ec.cfp->ep);
|
||||
const rb_cref_t *cref;
|
||||
VALUE klass = Qnil;
|
||||
|
||||
|
@ -837,10 +837,10 @@ vm_get_ev_const(rb_thread_t *th, VALUE orig_klass, ID id, int is_defined)
|
|||
|
||||
/* search self */
|
||||
if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
|
||||
klass = vm_get_iclass(th->cfp, CREF_CLASS(root_cref));
|
||||
klass = vm_get_iclass(th->ec.cfp, CREF_CLASS(root_cref));
|
||||
}
|
||||
else {
|
||||
klass = CLASS_OF(th->cfp->self);
|
||||
klass = CLASS_OF(th->ec.cfp->self);
|
||||
}
|
||||
|
||||
if (is_defined) {
|
||||
|
@ -1498,7 +1498,7 @@ vm_callee_setup_arg(rb_thread_t *th, struct rb_calling_info *calling, const stru
|
|||
const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
|
||||
{
|
||||
if (LIKELY(simple_iseq_p(iseq))) {
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
|
||||
CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */
|
||||
|
||||
|
@ -1581,7 +1581,7 @@ vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_
|
|||
}
|
||||
|
||||
vm_pop_frame(th, cfp, cfp->ep);
|
||||
cfp = th->cfp;
|
||||
cfp = th->ec.cfp;
|
||||
|
||||
sp_orig = sp = cfp->sp;
|
||||
|
||||
|
@ -1744,8 +1744,8 @@ vm_profile_show_result(void)
|
|||
#endif
|
||||
|
||||
#define CHECK_CFP_CONSISTENCY(func) \
|
||||
(LIKELY(reg_cfp == th->cfp + 1) ? (void) 0 : \
|
||||
rb_bug(func ": cfp consistency error (%p, %p)", reg_cfp, th->cfp+1))
|
||||
(LIKELY(reg_cfp == th->ec.cfp + 1) ? (void) 0 : \
|
||||
rb_bug(func ": cfp consistency error (%p, %p)", reg_cfp, th->ec.cfp+1))
|
||||
|
||||
static inline
|
||||
const rb_method_cfunc_t *
|
||||
|
@ -1792,7 +1792,7 @@ vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb
|
|||
|
||||
vm_push_frame(th, NULL, VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL, recv,
|
||||
block_handler, (VALUE)me,
|
||||
0, th->cfp->sp, 0, 0);
|
||||
0, th->ec.cfp->sp, 0, 0);
|
||||
|
||||
if (len >= 0) rb_check_arity(argc, len, len);
|
||||
|
||||
|
@ -1827,7 +1827,7 @@ vm_call_cfunc_latter(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_cal
|
|||
val = (*cfunc->invoker)(cfunc->func, recv, argc, argv);
|
||||
|
||||
/* check */
|
||||
if (reg_cfp == th->cfp) { /* no frame push */
|
||||
if (reg_cfp == th->ec.cfp) { /* no frame push */
|
||||
if (UNLIKELY(th->passed_ci != ci)) {
|
||||
rb_bug("vm_call_cfunc_latter: passed_ci error (ci: %p, passed_ci: %p)", ci, th->passed_ci);
|
||||
}
|
||||
|
@ -1878,7 +1878,7 @@ rb_vm_call_cfunc_push_frame(rb_thread_t *th)
|
|||
|
||||
vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL,
|
||||
calling->recv, calling->block_handler, (VALUE)me /* cref */,
|
||||
0, th->cfp->sp + cc->aux.inc_sp, 0, 0);
|
||||
0, th->ec.cfp->sp + cc->aux.inc_sp, 0, 0);
|
||||
|
||||
if (calling->call != vm_call_general) {
|
||||
calling->call = vm_call_cfunc_with_frame;
|
||||
|
@ -2501,7 +2501,7 @@ vm_yield_with_cfunc(rb_thread_t *th,
|
|||
self,
|
||||
VM_GUARDED_PREV_EP(captured->ep),
|
||||
(VALUE)me,
|
||||
0, th->cfp->sp, 0, 0);
|
||||
0, th->ec.cfp->sp, 0, 0);
|
||||
val = (*ifunc->func)(arg, ifunc->data, argc, argv, blockarg);
|
||||
rb_vm_pop_frame(th);
|
||||
|
||||
|
@ -2546,7 +2546,7 @@ static int
|
|||
vm_callee_setup_block_arg(rb_thread_t *th, struct rb_calling_info *calling, const struct rb_call_info *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
|
||||
{
|
||||
if (simple_iseq_p(iseq)) {
|
||||
rb_control_frame_t *cfp = th->cfp;
|
||||
rb_control_frame_t *cfp = th->ec.cfp;
|
||||
VALUE arg0;
|
||||
|
||||
CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */
|
||||
|
@ -2631,7 +2631,7 @@ vm_invoke_symbol_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
|
|||
{
|
||||
VALUE val;
|
||||
int argc;
|
||||
CALLER_SETUP_ARG(th->cfp, calling, ci);
|
||||
CALLER_SETUP_ARG(th->ec.cfp, calling, ci);
|
||||
argc = calling->argc;
|
||||
val = vm_yield_with_symbol(th, symbol, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE);
|
||||
POPN(argc);
|
||||
|
@ -2645,7 +2645,7 @@ vm_invoke_ifunc_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
|
|||
{
|
||||
VALUE val;
|
||||
int argc;
|
||||
CALLER_SETUP_ARG(th->cfp, calling, ci);
|
||||
CALLER_SETUP_ARG(th->ec.cfp, calling, ci);
|
||||
argc = calling->argc;
|
||||
val = vm_yield_with_cfunc(th, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE);
|
||||
POPN(argc); /* TODO: should put before C/yield? */
|
||||
|
@ -2710,7 +2710,7 @@ static VALUE
|
|||
vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
struct rb_captured_block *captured;
|
||||
|
||||
if (cfp == 0) {
|
||||
|
@ -3250,7 +3250,7 @@ vm_once_dispatch(ISEQ iseq, IC ic, rb_thread_t *th)
|
|||
val = is->once.value = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
|
||||
/* is->once.running_thread is cleared by vm_once_clear() */
|
||||
is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
|
||||
rb_iseq_add_mark_object(th->cfp->iseq, val);
|
||||
rb_iseq_add_mark_object(th->ec.cfp->iseq, val);
|
||||
return val;
|
||||
}
|
||||
else if (is->once.running_thread == th) {
|
||||
|
|
|
@ -95,7 +95,7 @@ enum vm_regan_acttype {
|
|||
#define SET_SV(x) (*GET_SP() = (x))
|
||||
/* set current stack value as x */
|
||||
|
||||
#define GET_SP_COUNT() (VM_REG_SP - th->stack)
|
||||
#define GET_SP_COUNT() (VM_REG_SP - th->ec.stack)
|
||||
|
||||
/* instruction sequence C struct */
|
||||
#define GET_ISEQ() (GET_CFP()->iseq)
|
||||
|
|
|
@ -264,7 +264,7 @@ method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def,
|
|||
|
||||
def->body.attr.id = (ID)(VALUE)opts;
|
||||
|
||||
cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (cfp && (line = rb_vm_get_sourceline(cfp))) {
|
||||
VALUE location = rb_ary_new3(2, cfp->iseq->body->location.path, INT2FIX(line));
|
||||
|
@ -1088,7 +1088,7 @@ static rb_method_visibility_t
|
|||
rb_scope_visibility_get(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (!vm_env_cref_by_cref(cfp->ep)) {
|
||||
return METHOD_VISI_PUBLIC;
|
||||
|
@ -1102,7 +1102,7 @@ static int
|
|||
rb_scope_module_func_check(void)
|
||||
{
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
|
||||
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->ec.cfp);
|
||||
|
||||
if (!vm_env_cref_by_cref(cfp->ep)) {
|
||||
return FALSE;
|
||||
|
|
|
@ -359,7 +359,7 @@ rb_threadptr_exec_event_hooks_orig(rb_trace_arg_t *trace_arg, int pop_p)
|
|||
|
||||
if (state) {
|
||||
if (pop_p) {
|
||||
if (VM_FRAME_FINISHED_P(th->cfp)) {
|
||||
if (VM_FRAME_FINISHED_P(th->ec.cfp)) {
|
||||
th->tag = th->tag->prev;
|
||||
}
|
||||
rb_vm_pop_frame(th);
|
||||
|
|
Загрузка…
Ссылка в новой задаче