store ec instead of thread in rb_context_t.

* cont.c (rb_context_t): introduce saved_ec instaad of saved_thread.
  We only need to transfer ec data (not all of thread data).

  Introduce `thread_value` field to point creation thread.
  To acccess this field, `cont_thread_value()` is introduced.

* vm.c (rb_execution_context_mark): remove `static` and use it
  from cont.c (use this function instead of `rb_thread_mark`).

* vm_insnhelper.c (rb_vm_push_frame): accept ec instead of th.


git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@59831 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
ko1 2017-09-10 19:00:08 +00:00
Родитель 2a01ac897d
Коммит b53b37758d
5 изменённых файлов: 108 добавлений и 92 удалений

153
cont.c
Просмотреть файл

@ -106,10 +106,11 @@ typedef struct rb_context_struct {
int register_stack_size;
#endif
} machine;
rb_thread_t saved_thread; /* selected properties of GET_THREAD() (see cont_save_thread) */
rb_execution_context_t saved_ec;
rb_jmpbuf_t jmpbuf;
rb_ensure_entry_t *ensure_array;
rb_ensure_list_t *ensure_list;
VALUE thread_value;
} rb_context_t;
@ -216,6 +217,12 @@ NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
if (!(th)->ec.tag) rb_raise(rb_eThreadError, "not running thread"); \
} while (0)
static VALUE
cont_thread_value(const rb_context_t *cont)
{
return cont->thread_value;
}
static void
cont_mark(void *ptr)
{
@ -224,8 +231,8 @@ cont_mark(void *ptr)
RUBY_MARK_ENTER("cont");
rb_gc_mark(cont->value);
rb_thread_mark(&cont->saved_thread);
rb_gc_mark(cont->saved_thread.self);
rb_execution_context_mark(&cont->saved_ec);
rb_gc_mark(cont_thread_value(cont));
if (cont->saved_vm_stack.ptr) {
#ifdef CAPTURE_JUST_VALID_VM_STACK
@ -233,7 +240,7 @@ cont_mark(void *ptr)
cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
#else
rb_gc_mark_locations(cont->saved_vm_stack.ptr,
cont->saved_vm_stack.ptr, cont->saved_thread.stack_size);
cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
#endif
}
@ -245,7 +252,7 @@ cont_mark(void *ptr)
}
else {
/* fiber */
const rb_thread_t *th = rb_thread_ptr(cont->saved_thread.self);
const rb_thread_t *th = rb_thread_ptr(cont_thread_value(cont));
const rb_fiber_t *fib = (rb_fiber_t*)cont;
if ((th->ec.fiber != fib) && !FIBER_TERMINATED_P(fib)) {
@ -270,7 +277,7 @@ cont_free(void *ptr)
rb_context_t *cont = ptr;
RUBY_FREE_ENTER("cont");
RUBY_FREE_UNLESS_NULL(cont->saved_thread.ec.vm_stack);
RUBY_FREE_UNLESS_NULL(cont->saved_ec.vm_stack);
#if FIBER_USE_NATIVE
if (cont->type == CONTINUATION_CONTEXT) {
/* cont */
@ -329,7 +336,7 @@ cont_memsize(const void *ptr)
#ifdef CAPTURE_JUST_VALID_VM_STACK
size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
#else
size_t n = cont->saved_thread.ec.vm_stack_size;
size_t n = cont->saved_ec.vm_stack_size;
#endif
size += n * sizeof(*cont->saved_vm_stack.ptr);
}
@ -351,10 +358,10 @@ fiber_verify(const rb_fiber_t *fib)
#if VM_CHECK_MODE > 0
switch (fib->status) {
case FIBER_RESUMED:
VM_ASSERT(fib->cont.saved_thread.ec.vm_stack == NULL);
VM_ASSERT(fib->cont.saved_ec.vm_stack == NULL);
break;
case FIBER_SUSPENDED:
VM_ASSERT(fib->cont.saved_thread.ec.vm_stack != NULL);
VM_ASSERT(fib->cont.saved_ec.vm_stack != NULL);
break;
case FIBER_CREATED:
case FIBER_TERMINATED:
@ -391,8 +398,8 @@ fiber_free(void *ptr)
rb_fiber_t *fib = ptr;
RUBY_FREE_ENTER("fiber");
if (fib->cont.type != ROOT_FIBER_CONTEXT &&
fib->cont.saved_thread.ec.local_storage) {
st_free_table(fib->cont.saved_thread.ec.local_storage);
fib->cont.saved_ec.local_storage) {
st_free_table(fib->cont.saved_ec.local_storage);
}
cont_free(&fib->cont);
@ -407,8 +414,8 @@ fiber_memsize(const void *ptr)
size = sizeof(*fib);
if (fib->cont.type != ROOT_FIBER_CONTEXT &&
fib->cont.saved_thread.ec.local_storage != NULL) {
size += st_memsize(fib->cont.saved_thread.ec.local_storage);
fib->cont.saved_ec.local_storage != NULL) {
size += st_memsize(fib->cont.saved_ec.local_storage);
}
size += cont_memsize(&fib->cont);
return size;
@ -478,20 +485,20 @@ static const rb_data_type_t cont_data_type = {
static inline void
cont_save_thread(rb_context_t *cont, rb_thread_t *th)
{
rb_thread_t *sth = &cont->saved_thread;
rb_execution_context_t *sec = &cont->saved_ec;
VM_ASSERT(th->status == THREAD_RUNNABLE);
/* save thread context */
sth->ec = th->ec;
*sec = th->ec;
/* saved_thread->machine.stack_end should be NULL */
/* because it may happen GC afterward */
sth->ec.machine.stack_end = NULL;
sec->machine.stack_end = NULL;
#ifdef __ia64
sth->ec.machine.register_stack_start = NULL;
sth->ec.machine.register_stack_end = NULL;
sec->machine.register_stack_start = NULL;
sec->machine.register_stack_end = NULL;
#endif
}
@ -500,11 +507,10 @@ cont_init(rb_context_t *cont, rb_thread_t *th)
{
/* save thread context */
cont_save_thread(cont, th);
cont->saved_thread.self = th->self;
cont->saved_thread.ec.local_storage = NULL;
cont->saved_thread.ec.local_storage_recursive_hash = Qnil;
cont->saved_thread.ec.local_storage_recursive_hash_for_trace = Qnil;
cont->thread_value = th->self;
cont->saved_ec.local_storage = NULL;
cont->saved_ec.local_storage_recursive_hash = Qnil;
cont->saved_ec.local_storage_recursive_hash_for_trace = Qnil;
}
static rb_context_t *
@ -545,7 +551,7 @@ cont_capture(volatile int *volatile stat)
cont->saved_vm_stack.ptr = ALLOC_N(VALUE, ec->vm_stack_size);
MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack, VALUE, ec->vm_stack_size);
#endif
cont->saved_thread.ec.vm_stack = NULL;
cont->saved_ec.vm_stack = NULL;
cont_save_machine_stack(th, cont);
@ -584,16 +590,12 @@ cont_capture(volatile int *volatile stat)
static inline void
fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fib)
{
rb_thread_t *sth = &fib->cont.saved_thread;
th->ec = sth->ec;
sth->ec.vm_stack = NULL;
th->ec = fib->cont.saved_ec;
fib->cont.saved_ec.vm_stack = NULL;
VM_ASSERT(th->ec.vm_stack != NULL);
VM_ASSERT(sth->status == THREAD_RUNNABLE);
}
static inline void
cont_restore_thread(rb_context_t *cont)
{
@ -602,38 +604,37 @@ cont_restore_thread(rb_context_t *cont)
/* restore thread context */
if (cont->type == CONTINUATION_CONTEXT) {
/* continuation */
rb_thread_t *sth = &cont->saved_thread;
rb_execution_context_t *sec = &cont->saved_ec;
const rb_fiber_t *fib;
fib = th->ec.fiber = sth->ec.fiber;
fib = th->ec.fiber = sec->fiber;
if (fib == NULL) fib = th->root_fiber;
if (fib && fib->cont.saved_thread.ec.vm_stack) {
th->ec.vm_stack_size = fib->cont.saved_thread.ec.vm_stack_size;
th->ec.vm_stack = fib->cont.saved_thread.ec.vm_stack;
if (fib && fib->cont.saved_ec.vm_stack) {
th->ec.vm_stack_size = fib->cont.saved_ec.vm_stack_size;
th->ec.vm_stack = fib->cont.saved_ec.vm_stack;
}
#ifdef CAPTURE_JUST_VALID_VM_STACK
MEMCPY(th->ec.vm_stack, cont->saved_vm_stack.ptr, VALUE, cont->saved_vm_stack.slen);
MEMCPY(th->ec.vm_stack + sth->ec.vm_stack_size - cont->saved_vm_stack.clen,
MEMCPY(th->ec.vm_stack + sec->vm_stack_size - cont->saved_vm_stack.clen,
cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen, VALUE, cont->saved_vm_stack.clen);
#else
MEMCPY(th->ec.vm_stack, cont->saved_vm_stack.ptr, VALUE, sth->ec.vm_stack_size);
MEMCPY(th->ec.vm_stack, cont->saved_vm_stack.ptr, VALUE, sec->vm_stack_size);
#endif
/* other members of ec */
th->ec.cfp = sth->ec.cfp;
th->ec.safe_level = sth->ec.safe_level;
th->ec.raised_flag = sth->ec.raised_flag;
th->ec.tag = sth->ec.tag;
th->ec.protect_tag = sth->ec.protect_tag;
th->ec.root_lep = sth->ec.root_lep;
th->ec.root_svar = sth->ec.root_svar;
th->ec.ensure_list = sth->ec.ensure_list;
th->ec.errinfo = sth->ec.errinfo;
th->ec.trace_arg = sth->ec.trace_arg;
th->ec.cfp = sec->cfp;
th->ec.safe_level = sec->safe_level;
th->ec.raised_flag = sec->raised_flag;
th->ec.tag = sec->tag;
th->ec.protect_tag = sec->protect_tag;
th->ec.root_lep = sec->root_lep;
th->ec.root_svar = sec->root_svar;
th->ec.ensure_list = sec->ensure_list;
th->ec.errinfo = sec->errinfo;
th->ec.trace_arg = sec->trace_arg;
VM_ASSERT(th->ec.vm_stack != NULL);
VM_ASSERT(sth->status == THREAD_RUNNABLE);
}
else {
/* fiber */
@ -713,7 +714,7 @@ fiber_machine_stack_alloc(size_t size)
static void
fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
{
rb_thread_t *sth = &fib->cont.saved_thread;
rb_execution_context_t *sec = &fib->cont.saved_ec;
#ifdef _WIN32
# if defined(_MSC_VER) && _MSC_VER <= 1200
@ -729,7 +730,7 @@ fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
rb_raise(rb_eFiberError, "can't create fiber");
}
}
sth->ec.machine.stack_maxsize = size;
sec->machine.stack_maxsize = size;
#else /* not WIN32 */
ucontext_t *context = &fib->context;
char *ptr;
@ -743,8 +744,8 @@ fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
fib->ss_sp = ptr;
fib->ss_size = size;
makecontext(context, rb_fiber_start, 0);
sth->ec.machine.stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
sth->ec.machine.stack_maxsize = size - RB_PAGE_SIZE;
sec->machine.stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
sec->machine.stack_maxsize = size - RB_PAGE_SIZE;
#endif
#ifdef __ia64
sth->machine.register_stack_maxsize = sth->machine.stack_maxsize;
@ -773,10 +774,10 @@ fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
}
/* exchange machine_stack_start between oldfib and newfib */
oldfib->cont.saved_thread.ec.machine.stack_start = th->ec.machine.stack_start;
oldfib->cont.saved_ec.machine.stack_start = th->ec.machine.stack_start;
/* oldfib->machine.stack_end should be NULL */
oldfib->cont.saved_thread.ec.machine.stack_end = NULL;
oldfib->cont.saved_ec.machine.stack_end = NULL;
/* restore thread context */
fiber_restore_thread(th, newfib);
@ -1124,14 +1125,14 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
rb_thread_t *th = GET_THREAD();
GetContPtr(contval, cont);
if (cont->saved_thread.self != th->self) {
if (cont_thread_value(cont) != th->self) {
rb_raise(rb_eRuntimeError, "continuation called across threads");
}
if (cont->saved_thread.ec.protect_tag != th->ec.protect_tag) {
if (cont->saved_ec.protect_tag != th->ec.protect_tag) {
rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
}
if (cont->saved_thread.ec.fiber) {
if (th->ec.fiber != cont->saved_thread.ec.fiber) {
if (cont->saved_ec.fiber) {
if (th->ec.fiber != cont->saved_ec.fiber) {
rb_raise(rb_eRuntimeError, "continuation called across fiber");
}
}
@ -1238,7 +1239,7 @@ fiber_t_alloc(VALUE fibval)
fib->cont.self = fibval;
fib->cont.type = FIBER_CONTEXT;
cont_init(&fib->cont, th);
fib->cont.saved_thread.ec.fiber = fib;
fib->cont.saved_ec.fiber = fib;
fib->prev = NULL;
/* fib->status == 0 == CREATED
@ -1251,7 +1252,7 @@ fiber_t_alloc(VALUE fibval)
}
rb_control_frame_t *
rb_vm_push_frame(rb_thread_t *th,
rb_vm_push_frame(rb_execution_context_t *sec,
const rb_iseq_t *iseq,
VALUE type,
VALUE self,
@ -1267,34 +1268,34 @@ fiber_init(VALUE fibval, VALUE proc)
{
rb_fiber_t *fib = fiber_t_alloc(fibval);
rb_context_t *cont = &fib->cont;
rb_thread_t *th = &cont->saved_thread;
rb_execution_context_t *sec = &cont->saved_ec;
rb_thread_t *cth = GET_THREAD();
/* initialize cont */
cont->saved_vm_stack.ptr = NULL;
th->ec.vm_stack = NULL;
th->ec.vm_stack_size = 0;
sec->vm_stack = NULL;
sec->vm_stack_size = 0;
th->ec.vm_stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
th->ec.vm_stack = ALLOC_N(VALUE, th->ec.vm_stack_size);
th->ec.cfp = (void *)(th->ec.vm_stack + th->ec.vm_stack_size);
sec->vm_stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
sec->vm_stack = ALLOC_N(VALUE, sec->vm_stack_size);
sec->cfp = (void *)(sec->vm_stack + sec->vm_stack_size);
rb_vm_push_frame(th,
rb_vm_push_frame(sec,
NULL,
VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME,
Qnil, /* self */
VM_BLOCK_HANDLER_NONE,
0, /* specval */
NULL, /* pc */
th->ec.vm_stack, /* sp */
sec->vm_stack, /* sp */
0, /* local_size */
0);
th->ec.tag = NULL;
th->ec.local_storage = NULL;
th->ec.local_storage_recursive_hash = Qnil;
th->ec.local_storage_recursive_hash_for_trace = Qnil;
sec->tag = NULL;
sec->local_storage = NULL;
sec->local_storage_recursive_hash = Qnil;
sec->local_storage_recursive_hash_for_trace = Qnil;
fib->first_proc = proc;
@ -1389,7 +1390,7 @@ fiber_current(void)
if (th->ec.fiber == NULL) {
rb_fiber_t *fib = root_fiber_alloc(th);
/* Running thread object has stack management responsibility */
fib->cont.saved_thread.ec.vm_stack = NULL;
fib->cont.saved_ec.vm_stack = NULL;
}
return th->ec.fiber;
}
@ -1511,10 +1512,10 @@ fiber_switch(rb_fiber_t *fib, int argc, const VALUE *argv, int is_resume)
return make_passing_arg(argc, argv);
}
if (cont->saved_thread.self != th->self) {
if (cont_thread_value(cont) != th->self) {
rb_raise(rb_eFiberError, "fiber called across threads");
}
else if (cont->saved_thread.ec.protect_tag != th->ec.protect_tag) {
else if (cont->saved_ec.protect_tag != th->ec.protect_tag) {
rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
}
else if (FIBER_TERMINATED_P(fib)) {
@ -1613,7 +1614,7 @@ rb_fiber_reset_root_local_storage(VALUE thval)
rb_thread_t *th = rb_thread_ptr(thval);
if (th->root_fiber && th->root_fiber != th->ec.fiber) {
th->ec.local_storage = th->root_fiber->cont.saved_thread.ec.local_storage;
th->ec.local_storage = th->root_fiber->cont.saved_ec.local_storage;
}
}

Просмотреть файл

@ -1740,7 +1740,6 @@ PUREFUNC(VALUE rb_vm_top_self(void));
void rb_thread_recycle_stack_release(VALUE *);
void rb_vm_change_state(void);
void rb_vm_inc_const_missing_count(void);
void rb_thread_mark(void *th);
const void **rb_vm_get_insns_address_table(void);
VALUE rb_sourcefilename(void);
VALUE rb_source_location(int *pline);

2
vm.c
Просмотреть файл

@ -2365,7 +2365,7 @@ rb_thread_recycle_stack_release(VALUE *stack)
void rb_fiber_mark_self(rb_fiber_t *fib);
static void
void
rb_execution_context_mark(const rb_execution_context_t *ec)
{
/* mark VM stack */

Просмотреть файл

@ -1617,6 +1617,7 @@ void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th);
void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo);
void rb_execution_context_mark(const rb_execution_context_t *ec);
#define RUBY_VM_CHECK_INTS(th) ruby_vm_check_ints(th)
static inline void

Просмотреть файл

@ -197,18 +197,18 @@ vm_check_frame(VALUE type,
#endif /* VM_CHECK_MODE > 0 */
static inline rb_control_frame_t *
vm_push_frame(rb_thread_t *th,
const rb_iseq_t *iseq,
VALUE type,
VALUE self,
VALUE specval,
VALUE cref_or_me,
const VALUE *pc,
VALUE *sp,
int local_size,
int stack_max)
vm_push_frame_(rb_execution_context_t *ec,
const rb_iseq_t *iseq,
VALUE type,
VALUE self,
VALUE specval,
VALUE cref_or_me,
const VALUE *pc,
VALUE *sp,
int local_size,
int stack_max)
{
rb_control_frame_t *const cfp = th->ec.cfp - 1;
rb_control_frame_t *const cfp = ec->cfp - 1;
int i;
vm_check_frame(type, specval, cref_or_me, iseq);
@ -217,7 +217,7 @@ vm_push_frame(rb_thread_t *th,
/* check stack overflow */
CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
th->ec.cfp = cfp;
ec->cfp = cfp;
/* setup new frame */
cfp->pc = (VALUE *)pc;
@ -254,8 +254,23 @@ vm_push_frame(rb_thread_t *th,
return cfp;
}
static rb_control_frame_t *
vm_push_frame(rb_thread_t *th,
const rb_iseq_t *iseq,
VALUE type,
VALUE self,
VALUE specval,
VALUE cref_or_me,
const VALUE *pc,
VALUE *sp,
int local_size,
int stack_max)
{
return vm_push_frame_(&th->ec, iseq, type, self, specval, cref_or_me, pc, sp, local_size, stack_max);
}
rb_control_frame_t *
rb_vm_push_frame(rb_thread_t *th,
rb_vm_push_frame(rb_execution_context_t *ec,
const rb_iseq_t *iseq,
VALUE type,
VALUE self,
@ -266,7 +281,7 @@ rb_vm_push_frame(rb_thread_t *th,
int local_size,
int stack_max)
{
return vm_push_frame(th, iseq, type, self, specval, cref_or_me, pc, sp, local_size, stack_max);
return vm_push_frame_(ec, iseq, type, self, specval, cref_or_me, pc, sp, local_size, stack_max);
}
/* return TRUE if the frame is finished */