ruby/cont.c

1565 строки
40 KiB
C
Исходник Обычный вид История

/**********************************************************************
cont.c -
$Author$
created at: Thu May 23 09:03:43 2007
Copyright (C) 2007 Koichi Sasada
**********************************************************************/
#include "ruby/ruby.h"
#include "internal.h"
#include "vm_core.h"
#include "gc.h"
#include "eval_intern.h"
#if ((defined(_WIN32) && _WIN32_WINNT >= 0x0400) || (defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT))) && !defined(__NetBSD__) && !defined(__sun) && !defined(__ia64) && !defined(FIBER_USE_NATIVE)
#define FIBER_USE_NATIVE 1
/* FIBER_USE_NATIVE enables Fiber performance improvement using system
* dependent method such as make/setcontext on POSIX system or
* CreateFiber() API on Windows.
* This hack make Fiber context switch faster (x2 or more).
* However, it decrease maximum number of Fiber. For example, on the
* 32bit POSIX OS, ten or twenty thousands Fiber can be created.
*
* Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
* in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
*/
/* On our experience, NetBSD doesn't support using setcontext() and pthread
* simultaneously. This is because pthread_self(), TLS and other information
* are represented by stack pointer (higher bits of stack pointer).
* TODO: check such constraint on configure.
*/
#elif !defined(FIBER_USE_NATIVE)
#define FIBER_USE_NATIVE 0
#endif
#if FIBER_USE_NATIVE
#ifndef _WIN32
#include <unistd.h>
#include <sys/mman.h>
#include <ucontext.h>
#endif
#define RB_PAGE_SIZE (pagesize)
#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
static long pagesize;
#endif /*FIBER_USE_NATIVE*/
#define CAPTURE_JUST_VALID_VM_STACK 1
enum context_type {
CONTINUATION_CONTEXT = 0,
FIBER_CONTEXT = 1,
ROOT_FIBER_CONTEXT = 2
};
typedef struct rb_context_struct {
enum context_type type;
VALUE self;
int argc;
VALUE value;
VALUE *vm_stack;
#ifdef CAPTURE_JUST_VALID_VM_STACK
size_t vm_stack_slen; /* length of stack (head of th->stack) */
size_t vm_stack_clen; /* length of control frames (tail of th->stack) */
#endif
VALUE *machine_stack;
VALUE *machine_stack_src;
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
#ifdef __ia64
VALUE *machine_register_stack;
VALUE *machine_register_stack_src;
int machine_register_stack_size;
#endif
rb_thread_t saved_thread;
rb_jmpbuf_t jmpbuf;
size_t machine_stack_size;
} rb_context_t;
enum fiber_status {
CREATED,
RUNNING,
TERMINATED
};
#if FIBER_USE_NATIVE && !defined(_WIN32)
#define MAX_MACHINE_STACK_CACHE 10
static int machine_stack_cache_index = 0;
typedef struct machine_stack_cache_struct {
void *ptr;
size_t size;
} machine_stack_cache_t;
static machine_stack_cache_t machine_stack_cache[MAX_MACHINE_STACK_CACHE];
static machine_stack_cache_t terminated_machine_stack;
#endif
typedef struct rb_fiber_struct {
rb_context_t cont;
VALUE prev;
enum fiber_status status;
struct rb_fiber_struct *prev_fiber;
struct rb_fiber_struct *next_fiber;
/* If a fiber invokes "transfer",
* then this fiber can't "resume" any more after that.
* You shouldn't mix "transfer" and "resume".
*/
int transfered;
#if FIBER_USE_NATIVE
#ifdef _WIN32
void *fib_handle;
#else
ucontext_t context;
#endif
#endif
} rb_fiber_t;
static const rb_data_type_t cont_data_type, fiber_data_type;
static VALUE rb_cContinuation;
static VALUE rb_cFiber;
static VALUE rb_eFiberError;
#define GetContPtr(obj, ptr) \
TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
#define GetFiberPtr(obj, ptr) do {\
TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
} while (0)
NOINLINE(static VALUE cont_capture(volatile int *stat));
#define THREAD_MUST_BE_RUNNING(th) do { \
if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread"); \
} while (0)
static void
cont_mark(void *ptr)
{
RUBY_MARK_ENTER("cont");
if (ptr) {
rb_context_t *cont = ptr;
rb_gc_mark(cont->value);
rb_thread_mark(&cont->saved_thread);
rb_gc_mark(cont->saved_thread.self);
if (cont->vm_stack) {
#ifdef CAPTURE_JUST_VALID_VM_STACK
rb_gc_mark_locations(cont->vm_stack,
cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
#else
rb_gc_mark_localtion(cont->vm_stack,
cont->vm_stack, cont->saved_thread.stack_size);
#endif
}
if (cont->machine_stack) {
if (cont->type == CONTINUATION_CONTEXT) {
/* cont */
rb_gc_mark_locations(cont->machine_stack,
cont->machine_stack + cont->machine_stack_size);
}
else {
/* fiber */
rb_thread_t *th;
rb_fiber_t *fib = (rb_fiber_t*)cont;
GetThreadPtr(cont->saved_thread.self, th);
if ((th->fiber != cont->self) && fib->status == RUNNING) {
rb_gc_mark_locations(cont->machine_stack,
cont->machine_stack + cont->machine_stack_size);
}
}
}
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
#ifdef __ia64
if (cont->machine_register_stack) {
rb_gc_mark_locations(cont->machine_register_stack,
cont->machine_register_stack + cont->machine_register_stack_size);
}
#endif
}
RUBY_MARK_LEAVE("cont");
}
static void
cont_free(void *ptr)
{
RUBY_FREE_ENTER("cont");
if (ptr) {
rb_context_t *cont = ptr;
RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
#if FIBER_USE_NATIVE
if (cont->type == CONTINUATION_CONTEXT) {
/* cont */
RUBY_FREE_UNLESS_NULL(cont->machine_stack);
}
else {
/* fiber */
#ifdef _WIN32
if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
/* don't delete root fiber handle */
rb_fiber_t *fib = (rb_fiber_t*)cont;
if (fib->fib_handle) {
DeleteFiber(fib->fib_handle);
}
}
#else /* not WIN32 */
if (GET_THREAD()->fiber != cont->self) {
rb_fiber_t *fib = (rb_fiber_t*)cont;
if (fib->context.uc_stack.ss_sp) {
if (cont->type == ROOT_FIBER_CONTEXT) {
rb_bug("Illegal root fiber parameter");
}
munmap((void*)fib->context.uc_stack.ss_sp, fib->context.uc_stack.ss_size);
}
}
else {
/* It may reached here when finalize */
/* TODO examine whether it is a bug */
/* rb_bug("cont_free: release self"); */
}
#endif
}
#else /* not FIBER_USE_NATIVE */
RUBY_FREE_UNLESS_NULL(cont->machine_stack);
#endif
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
#ifdef __ia64
RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
#endif
RUBY_FREE_UNLESS_NULL(cont->vm_stack);
/* free rb_cont_t or rb_fiber_t */
ruby_xfree(ptr);
}
RUBY_FREE_LEAVE("cont");
}
static size_t
cont_memsize(const void *ptr)
{
const rb_context_t *cont = ptr;
size_t size = 0;
if (cont) {
size = sizeof(*cont);
if (cont->vm_stack) {
#ifdef CAPTURE_JUST_VALID_VM_STACK
size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
#else
size_t n = cont->saved_thread.stack_size;
#endif
size += n * sizeof(*cont->vm_stack);
}
if (cont->machine_stack) {
size += cont->machine_stack_size * sizeof(*cont->machine_stack);
}
#ifdef __ia64
if (cont->machine_register_stack) {
size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
}
#endif
}
return size;
}
static void
fiber_mark(void *ptr)
{
RUBY_MARK_ENTER("cont");
if (ptr) {
rb_fiber_t *fib = ptr;
rb_gc_mark(fib->prev);
cont_mark(&fib->cont);
}
RUBY_MARK_LEAVE("cont");
}
static void
fiber_link_join(rb_fiber_t *fib)
{
VALUE current_fibval = rb_fiber_current();
rb_fiber_t *current_fib;
GetFiberPtr(current_fibval, current_fib);
/* join fiber link */
fib->next_fiber = current_fib->next_fiber;
fib->prev_fiber = current_fib;
current_fib->next_fiber->prev_fiber = fib;
current_fib->next_fiber = fib;
}
static void
fiber_link_remove(rb_fiber_t *fib)
{
fib->prev_fiber->next_fiber = fib->next_fiber;
fib->next_fiber->prev_fiber = fib->prev_fiber;
}
static void
fiber_free(void *ptr)
{
RUBY_FREE_ENTER("fiber");
if (ptr) {
rb_fiber_t *fib = ptr;
if (fib->cont.type != ROOT_FIBER_CONTEXT &&
fib->cont.saved_thread.local_storage) {
st_free_table(fib->cont.saved_thread.local_storage);
}
fiber_link_remove(fib);
cont_free(&fib->cont);
}
RUBY_FREE_LEAVE("fiber");
}
static size_t
fiber_memsize(const void *ptr)
{
const rb_fiber_t *fib = ptr;
size_t size = 0;
if (ptr) {
size = sizeof(*fib);
if (fib->cont.type != ROOT_FIBER_CONTEXT) {
size += st_memsize(fib->cont.saved_thread.local_storage);
}
size += cont_memsize(&fib->cont);
}
return size;
}
VALUE
rb_obj_is_fiber(VALUE obj)
{
if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
return Qtrue;
}
else {
return Qfalse;
}
}
static void
cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
{
size_t size;
SET_MACHINE_STACK_END(&th->machine_stack_end);
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
#ifdef __ia64
th->machine_register_stack_end = rb_ia64_bsp();
#endif
if (th->machine_stack_start > th->machine_stack_end) {
size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
cont->machine_stack_src = th->machine_stack_end;
}
else {
size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
cont->machine_stack_src = th->machine_stack_start;
}
if (cont->machine_stack) {
REALLOC_N(cont->machine_stack, VALUE, size);
}
else {
cont->machine_stack = ALLOC_N(VALUE, size);
}
FLUSH_REGISTER_WINDOWS;
MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
#ifdef __ia64
rb_ia64_flushrs();
size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
cont->machine_register_stack_src = th->machine_register_stack_start;
if (cont->machine_register_stack) {
REALLOC_N(cont->machine_register_stack, VALUE, size);
}
else {
cont->machine_register_stack = ALLOC_N(VALUE, size);
}
MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
#endif
}
static const rb_data_type_t cont_data_type = {
"continuation",
{cont_mark, cont_free, cont_memsize,},
};
static void
cont_save_thread(rb_context_t *cont, rb_thread_t *th)
{
/* save thread context */
cont->saved_thread = *th;
/* saved_thread->machine_stack_(start|end) should be NULL */
/* because it may happen GC afterward */
cont->saved_thread.machine_stack_start = 0;
cont->saved_thread.machine_stack_end = 0;
#ifdef __ia64
cont->saved_thread.machine_register_stack_start = 0;
cont->saved_thread.machine_register_stack_end = 0;
#endif
}
static void
cont_init(rb_context_t *cont, rb_thread_t *th)
{
/* save thread context */
cont_save_thread(cont, th);
cont->saved_thread.local_storage = 0;
}
static rb_context_t *
cont_new(VALUE klass)
{
rb_context_t *cont;
volatile VALUE contval;
rb_thread_t *th = GET_THREAD();
THREAD_MUST_BE_RUNNING(th);
contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
cont->self = contval;
cont_init(cont, th);
return cont;
}
static VALUE
cont_capture(volatile int *stat)
{
rb_context_t *cont;
rb_thread_t *th = GET_THREAD(), *sth;
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
volatile VALUE contval;
THREAD_MUST_BE_RUNNING(th);
rb_vm_stack_to_heap(th);
cont = cont_new(rb_cContinuation);
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
contval = cont->self;
sth = &cont->saved_thread;
#ifdef CAPTURE_JUST_VALID_VM_STACK
cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
#else
cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
#endif
sth->stack = 0;
cont_save_machine_stack(th, cont);
if (ruby_setjmp(cont->jmpbuf)) {
volatile VALUE value;
value = cont->value;
if (cont->argc == -1) rb_exc_raise(value);
cont->value = Qnil;
*stat = 1;
return value;
}
else {
*stat = 0;
return contval;
}
}
static void
cont_restore_thread(rb_context_t *cont)
{
rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
/* restore thread context */
if (cont->type == CONTINUATION_CONTEXT) {
/* continuation */
VALUE fib;
th->fiber = sth->fiber;
fib = th->fiber ? th->fiber : th->root_fiber;
if (fib) {
rb_fiber_t *fcont;
GetFiberPtr(fib, fcont);
th->stack_size = fcont->cont.saved_thread.stack_size;
th->stack = fcont->cont.saved_thread.stack;
}
#ifdef CAPTURE_JUST_VALID_VM_STACK
MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
#else
MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
#endif
}
else {
/* fiber */
th->stack = sth->stack;
th->stack_size = sth->stack_size;
th->local_storage = sth->local_storage;
th->fiber = cont->self;
}
th->cfp = sth->cfp;
th->safe_level = sth->safe_level;
th->raised_flag = sth->raised_flag;
th->state = sth->state;
th->status = sth->status;
th->tag = sth->tag;
th->protect_tag = sth->protect_tag;
th->errinfo = sth->errinfo;
th->first_proc = sth->first_proc;
th->root_lep = sth->root_lep;
th->root_svar = sth->root_svar;
}
#if FIBER_USE_NATIVE
#ifdef _WIN32
static void
fiber_set_stack_location(void)
{
rb_thread_t *th = GET_THREAD();
VALUE *ptr;
SET_MACHINE_STACK_END(&ptr);
th->machine_stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
}
static VOID CALLBACK
fiber_entry(void *arg)
{
fiber_set_stack_location();
rb_fiber_start();
}
#else /* _WIN32 */
/*
* FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
* if MAP_STACK is passed.
* http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
*/
#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
#else
#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
#endif
static char*
fiber_machine_stack_alloc(size_t size)
{
char *ptr;
if (machine_stack_cache_index > 0) {
if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
machine_stack_cache_index--;
machine_stack_cache[machine_stack_cache_index].ptr = NULL;
machine_stack_cache[machine_stack_cache_index].size = 0;
}
else{
/* TODO handle multiple machine stack size */
rb_bug("machine_stack_cache size is not canonicalized");
}
}
else {
void *page;
STACK_GROW_DIR_DETECTION;
errno = 0;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
if (ptr == MAP_FAILED) {
rb_raise(rb_eFiberError, "can't alloc machine stack to fiber errno: %d", errno);
}
/* guard page setup */
page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
rb_raise(rb_eFiberError, "mprotect failed");
}
}
return ptr;
}
#endif
static void
fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
{
rb_thread_t *sth = &fib->cont.saved_thread;
#ifdef _WIN32
fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
if (!fib->fib_handle) {
/* try to release unnecessary fibers & retry to create */
rb_gc();
fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
if (!fib->fib_handle) {
rb_raise(rb_eFiberError, "can't create fiber");
}
}
sth->machine_stack_maxsize = size;
#else /* not WIN32 */
ucontext_t *context = &fib->context;
char *ptr;
STACK_GROW_DIR_DETECTION;
getcontext(context);
ptr = fiber_machine_stack_alloc(size);
context->uc_link = NULL;
context->uc_stack.ss_sp = ptr;
context->uc_stack.ss_size = size;
makecontext(context, rb_fiber_start, 0);
sth->machine_stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
sth->machine_stack_maxsize = size - RB_PAGE_SIZE;
#endif
#ifdef __ia64
sth->machine_register_stack_maxsize = sth->machine_stack_maxsize;
#endif
}
NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
static void
fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
{
rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
if (newfib->status != RUNNING) {
* vm.c: support variable VM/Machine stack sizes. Specified by the following environment variaables: - RUBY_THREAD_VM_STACK_SIZE: vm stack size used at thread creation. default: 128KB (32bit CPU) or 256KB (64bit CPU). - RUBY_THREAD_MACHINE_STACK_SIZE: machine stack size used at thread creation. default: 512KB or 1024KB. - RUBY_FIBER_VM_STACK_SIZE: vm stack size used at fiber creation. default: 64KB or 128KB. - RUBY_FIBER_MACHINE_STACK_SIZE: machine stack size used at fiber creation. default: 256KB or 256KB. This values are specified at launched timing. You can not change these values at running time. Environ variables are only *hints* because: - They are aligned to 4KB. - They have minimum values (depend on OSs). - Machine stack settings are ignored by some OSs. Default values especially fiber stack sizes are increased. This change affect Fiber's behavior: (1) You can run more complex program on a Fiber. (2) You can not make many (thousands) Fibers because of lack of address space (on 32bit CPU). If (2) bothers you, (a) Use 64bit CPU with big memory, or (b) Specify RUBY_FIBER_(VM|MACHINE)_STACK_SIZE correctly. You need to choose correct stack size carefully. These values are completely rely on systems (OS/compiler and so on). * vm_core.h (rb_vm_t::default_params): add to record above settings. * vm.c (RubyVM::DEFAULT_PARAMS): add new constant to see above setting. * thread_pthread.c: support RUBY_THREAD_MACHINE_STACK_SIZE. * cont.c: support RUBY_FIBER_(VM|MACHINE)_STACK_SIZE. * test/ruby/test_fiber.rb: add tests for above. * test/ruby/test_thread.rb: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38478 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-20 02:29:18 +04:00
fiber_initialize_machine_stack_context(newfib, th->vm->default_params.fiber_machine_stack_size);
}
/* restore thread context */
cont_restore_thread(&newfib->cont);
th->machine_stack_maxsize = sth->machine_stack_maxsize;
if (sth->machine_stack_end && (newfib != oldfib)) {
rb_bug("fiber_setcontext: sth->machine_stack_end has non zero value");
}
/* save oldfib's machine stack */
if (oldfib->status != TERMINATED) {
STACK_GROW_DIR_DETECTION;
SET_MACHINE_STACK_END(&th->machine_stack_end);
if (STACK_DIR_UPPER(0, 1)) {
oldfib->cont.machine_stack_size = th->machine_stack_start - th->machine_stack_end;
oldfib->cont.machine_stack = th->machine_stack_end;
}
else {
oldfib->cont.machine_stack_size = th->machine_stack_end - th->machine_stack_start;
oldfib->cont.machine_stack = th->machine_stack_start;
}
}
/* exchange machine_stack_start between oldfib and newfib */
oldfib->cont.saved_thread.machine_stack_start = th->machine_stack_start;
th->machine_stack_start = sth->machine_stack_start;
/* oldfib->machine_stack_end should be NULL */
oldfib->cont.saved_thread.machine_stack_end = 0;
#ifndef _WIN32
if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
}
#endif
/* swap machine context */
#ifdef _WIN32
SwitchToFiber(newfib->fib_handle);
#else
swapcontext(&oldfib->context, &newfib->context);
#endif
}
#endif
NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
static void
cont_restore_1(rb_context_t *cont)
{
cont_restore_thread(cont);
/* restore machine stack */
#ifdef _M_AMD64
{
/* workaround for x64 SEH */
jmp_buf buf;
setjmp(buf);
((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
((_JUMP_BUFFER*)(&buf))->Frame;
}
#endif
if (cont->machine_stack_src) {
FLUSH_REGISTER_WINDOWS;
MEMCPY(cont->machine_stack_src, cont->machine_stack,
VALUE, cont->machine_stack_size);
}
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
#ifdef __ia64
if (cont->machine_register_stack_src) {
MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
VALUE, cont->machine_register_stack_size);
}
#endif
ruby_longjmp(cont->jmpbuf, 1);
}
NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
#ifdef __ia64
#define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
#define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
static volatile int C(a), C(b), C(c), C(d), C(e);
static volatile int C(f), C(g), C(h), C(i), C(j);
static volatile int C(k), C(l), C(m), C(n), C(o);
static volatile int C(p), C(q), C(r), C(s), C(t);
#if 0
{/* the above lines make cc-mode.el confused so much */}
#endif
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
int rb_dummy_false = 0;
NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
static void
register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
{
if (rb_dummy_false) {
/* use registers as much as possible */
E(a) = E(b) = E(c) = E(d) = E(e) =
E(f) = E(g) = E(h) = E(i) = E(j) =
E(k) = E(l) = E(m) = E(n) = E(o) =
E(p) = E(q) = E(r) = E(s) = E(t) = 0;
E(a) = E(b) = E(c) = E(d) = E(e) =
E(f) = E(g) = E(h) = E(i) = E(j) =
E(k) = E(l) = E(m) = E(n) = E(o) =
E(p) = E(q) = E(r) = E(s) = E(t) = 0;
}
if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
}
cont_restore_0(cont, vp);
* eval_load.c (Init_load): delay allocating an array for rb_load_path to avoid GC problem in very early stage. (RUBY_GC_STRESS causes GC in such stage.) * variable.c (rb_gc_mark_global_tbl): rb_global_tbl may be 0 in very early stage. * thread.c (thread_cleanup_func) [IA64]: clear register stack position. (thread_start_func_2) [IA64]: record the beginning of register stack using extra argument. (rb_gc_save_machine_context) [IA64]: record the end of register stack. * gc.c [IA64] (SET_STACK_END): record the end of register stack. (garbage_collect) [IA64]: use recorded register stack area for GC marking. (yarv_machine_stack_mark) [IA64]: GC mark from the register stack area. * yarvcore.c [IA64] (rb_gc_register_stack_start): defined. (Init_VM): store th->self on stack to fix GC problem. (Init_yarv) [IA64]: initialize the beginning of register stack. * yarvcore.h (struct rb_thread_struct) [IA64]: new members for register stack area. * thread_pthread.ci (thread_start_func_1) [IA64]: call thread_start_func_2 with the end of register stack. * cont.c (struct rb_context_struct) [IA64]: new members for register stack area. (cont_mark) [IA64]: GC mark from register stack area. (cont_free) [IA64]: free saved register stack. (cont_save_machine_stack) [IA64]: record the position and contents of the register stack. (cont_capture): store cont->self on stack to fix GC problem. (cont_restore_1) [IA64]: restore the register stack. [IA64] (register_stack_extend): new function. (cont_restore_0) [IA64]: call register_stack_extend instead of cont_restore_1. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12537 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-14 12:35:20 +04:00
}
#undef C
#undef E
#endif
static void
cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
{
if (cont->machine_stack_src) {
#ifdef HAVE_ALLOCA
#define STACK_PAD_SIZE 1
#else
#define STACK_PAD_SIZE 1024
#endif
VALUE space[STACK_PAD_SIZE];
#if !STACK_GROW_DIRECTION
if (addr_in_prev_frame > &space[0]) {
/* Stack grows downward */
#endif
#if STACK_GROW_DIRECTION <= 0
volatile VALUE *const end = cont->machine_stack_src;
if (&space[0] > end) {
# ifdef HAVE_ALLOCA
volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
space[0] = *sp;
# else
cont_restore_0(cont, &space[0]);
# endif
}
#endif
#if !STACK_GROW_DIRECTION
}
else {
/* Stack grows upward */
#endif
#if STACK_GROW_DIRECTION >= 0
volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
if (&space[STACK_PAD_SIZE] < end) {
# ifdef HAVE_ALLOCA
volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
space[0] = *sp;
# else
cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
# endif
}
#endif
#if !STACK_GROW_DIRECTION
}
#endif
}
cont_restore_1(cont);
}
#ifdef __ia64
#define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
#endif
/*
* Document-class: Continuation
*
* Continuation objects are generated by Kernel#callcc,
* after having +require+d <i>continuation</i>. They hold
* a return address and execution context, allowing a nonlocal return
* to the end of the <code>callcc</code> block from anywhere within a
* program. Continuations are somewhat analogous to a structured
* version of C's <code>setjmp/longjmp</code> (although they contain
* more state, so you might consider them closer to threads).
*
* For instance:
*
* require "continuation"
* arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
* callcc{|cc| $cc = cc}
* puts(message = arr.shift)
* $cc.call unless message =~ /Max/
*
* <em>produces:</em>
*
* Freddie
* Herbie
* Ron
* Max
*
* This (somewhat contrived) example allows the inner loop to abandon
* processing early:
*
* require "continuation"
* callcc {|cont|
* for i in 0..4
* print "\n#{i}: "
* for j in i*5...(i+1)*5
* cont.call() if j == 17
* printf "%3d", j
* end
* end
* }
* puts
*
* <em>produces:</em>
*
* 0: 0 1 2 3 4
* 1: 5 6 7 8 9
* 2: 10 11 12 13 14
* 3: 15 16
*/
/*
* call-seq:
* callcc {|cont| block } -> obj
*
* Generates a Continuation object, which it passes to
* the associated block. You need to <code>require
* 'continuation'</code> before using this method. Performing a
* <em>cont</em><code>.call</code> will cause the #callcc
* to return (as will falling through the end of the block). The
* value returned by the #callcc is the value of the
* block, or the value passed to <em>cont</em><code>.call</code>. See
* class Continuation for more details. Also see
* Kernel#throw for an alternative mechanism for
* unwinding a call stack.
*/
static VALUE
rb_callcc(VALUE self)
{
volatile int called;
volatile VALUE val = cont_capture(&called);
if (called) {
return val;
}
else {
return rb_yield(val);
}
}
static VALUE
make_passing_arg(int argc, VALUE *argv)
{
switch (argc) {
case 0:
return Qnil;
case 1:
return argv[0];
default:
return rb_ary_new4(argc, argv);
}
}
/*
* call-seq:
* cont.call(args, ...)
* cont[args, ...]
*
* Invokes the continuation. The program continues from the end of the
* <code>callcc</code> block. If no arguments are given, the original
* <code>callcc</code> returns <code>nil</code>. If one argument is
* given, <code>callcc</code> returns it. Otherwise, an array
* containing <i>args</i> is returned.
*
* callcc {|cont| cont.call } #=> nil
* callcc {|cont| cont.call 1 } #=> 1
* callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
*/
static VALUE
rb_cont_call(int argc, VALUE *argv, VALUE contval)
{
rb_context_t *cont;
rb_thread_t *th = GET_THREAD();
GetContPtr(contval, cont);
if (cont->saved_thread.self != th->self) {
rb_raise(rb_eRuntimeError, "continuation called across threads");
}
if (cont->saved_thread.protect_tag != th->protect_tag) {
rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
}
if (cont->saved_thread.fiber) {
rb_fiber_t *fcont;
GetFiberPtr(cont->saved_thread.fiber, fcont);
if (th->fiber != cont->saved_thread.fiber) {
rb_raise(rb_eRuntimeError, "continuation called across fiber");
}
}
cont->argc = argc;
cont->value = make_passing_arg(argc, argv);
* vm_trace.c, vm_core.h: simplify tracing mechanism. (1) add rb_hook_list_t data structure which includes hooks, events (flag) and `need_clean' flag. If the last flag is true, then clean the hooks list. In other words, deleted hooks are contained by `hooks'. Cleanup process should run before traversing the list. (2) Change check mechanism See EXEC_EVENT_HOOK() in vm_core.h. (3) Add `raw' hooks APIs Normal hooks are guarded from exception by rb_protect(). However, this protection is overhead for too simple functions which never cause exceptions. `raw' hooks are executed without protection and faster. Now, we only provide registration APIs. All `raw' hooks are kicked under protection (same as normal hooks). * include/ruby/ruby.h: remove internal data definition and macros. * internal.h (ruby_suppress_tracing), vm_trace.c: rename ruby_suppress_tracing() to rb_suppress_tracing() and remove unused function parameter. * parse.y: fix to use renamed rb_suppress_tracing(). * thread.c (thread_create_core): no need to set RUBY_VM_VM. * vm.c (mark_event_hooks): move definition to vm_trace.c. * vm.c (ruby_vm_event_flags): add a global variable. This global variable represents all of Threads and VM's event masks (T1#events | T2#events | ... | VM#events). You can check the possibility kick trace func or not with ruby_vm_event_flags. ruby_vm_event_flags is maintained by vm_trace.c. * cont.c (fiber_switch, rb_cont_call): restore tracing status. [Feature #4347] * test/ruby/test_continuation.rb: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36715 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-08-16 15:41:24 +04:00
/* restore `tracing' context. see [Feature #4347] */
th->trace_arg = cont->saved_thread.trace_arg;
* vm_trace.c, vm_core.h: simplify tracing mechanism. (1) add rb_hook_list_t data structure which includes hooks, events (flag) and `need_clean' flag. If the last flag is true, then clean the hooks list. In other words, deleted hooks are contained by `hooks'. Cleanup process should run before traversing the list. (2) Change check mechanism See EXEC_EVENT_HOOK() in vm_core.h. (3) Add `raw' hooks APIs Normal hooks are guarded from exception by rb_protect(). However, this protection is overhead for too simple functions which never cause exceptions. `raw' hooks are executed without protection and faster. Now, we only provide registration APIs. All `raw' hooks are kicked under protection (same as normal hooks). * include/ruby/ruby.h: remove internal data definition and macros. * internal.h (ruby_suppress_tracing), vm_trace.c: rename ruby_suppress_tracing() to rb_suppress_tracing() and remove unused function parameter. * parse.y: fix to use renamed rb_suppress_tracing(). * thread.c (thread_create_core): no need to set RUBY_VM_VM. * vm.c (mark_event_hooks): move definition to vm_trace.c. * vm.c (ruby_vm_event_flags): add a global variable. This global variable represents all of Threads and VM's event masks (T1#events | T2#events | ... | VM#events). You can check the possibility kick trace func or not with ruby_vm_event_flags. ruby_vm_event_flags is maintained by vm_trace.c. * cont.c (fiber_switch, rb_cont_call): restore tracing status. [Feature #4347] * test/ruby/test_continuation.rb: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36715 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-08-16 15:41:24 +04:00
cont_restore_0(cont, &contval);
return Qnil; /* unreachable */
}
/*********/
/* fiber */
/*********/
/*
* Document-class: Fiber
*
* Fibers are primitives for implementing light weight cooperative
* concurrency in Ruby. Basically they are a means of creating code blocks
* that can be paused and resumed, much like threads. The main difference
* is that they are never preempted and that the scheduling must be done by
* the programmer and not the VM.
*
* As opposed to other stackless light weight concurrency models, each fiber
* comes with a small 4KB stack. This enables the fiber to be paused from deeply
* nested function calls within the fiber block.
*
* When a fiber is created it will not run automatically. Rather it must be
* be explicitly asked to run using the <code>Fiber#resume</code> method.
* The code running inside the fiber can give up control by calling
* <code>Fiber.yield</code> in which case it yields control back to caller
* (the caller of the <code>Fiber#resume</code>).
*
* Upon yielding or termination the Fiber returns the value of the last
* executed expression
*
* For instance:
*
* fiber = Fiber.new do
* Fiber.yield 1
* 2
* end
*
* puts fiber.resume
* puts fiber.resume
* puts fiber.resume
*
* <em>produces</em>
*
* 1
* 2
* FiberError: dead fiber called
*
* The <code>Fiber#resume</code> method accepts an arbitrary number of
* parameters, if it is the first call to <code>resume</code> then they
* will be passed as block arguments. Otherwise they will be the return
* value of the call to <code>Fiber.yield</code>
*
* Example:
*
* fiber = Fiber.new do |first|
* second = Fiber.yield first + 2
* end
*
* puts fiber.resume 10
* puts fiber.resume 14
* puts fiber.resume 18
*
* <em>produces</em>
*
* 12
* 14
* FiberError: dead fiber called
*
*/
static const rb_data_type_t fiber_data_type = {
"fiber",
{fiber_mark, fiber_free, fiber_memsize,},
};
static VALUE
fiber_alloc(VALUE klass)
{
return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
}
static rb_fiber_t*
fiber_t_alloc(VALUE fibval)
{
rb_fiber_t *fib;
rb_thread_t *th = GET_THREAD();
if (DATA_PTR(fibval) != 0) {
rb_raise(rb_eRuntimeError, "cannot initialize twice");
}
THREAD_MUST_BE_RUNNING(th);
fib = ALLOC(rb_fiber_t);
memset(fib, 0, sizeof(rb_fiber_t));
fib->cont.self = fibval;
fib->cont.type = FIBER_CONTEXT;
cont_init(&fib->cont, th);
fib->prev = Qnil;
fib->status = CREATED;
DATA_PTR(fibval) = fib;
return fib;
}
static VALUE
fiber_init(VALUE fibval, VALUE proc)
{
rb_fiber_t *fib = fiber_t_alloc(fibval);
rb_context_t *cont = &fib->cont;
rb_thread_t *th = &cont->saved_thread;
/* initialize cont */
cont->vm_stack = 0;
th->stack = 0;
th->stack_size = 0;
fiber_link_join(fib);
* vm.c: support variable VM/Machine stack sizes. Specified by the following environment variaables: - RUBY_THREAD_VM_STACK_SIZE: vm stack size used at thread creation. default: 128KB (32bit CPU) or 256KB (64bit CPU). - RUBY_THREAD_MACHINE_STACK_SIZE: machine stack size used at thread creation. default: 512KB or 1024KB. - RUBY_FIBER_VM_STACK_SIZE: vm stack size used at fiber creation. default: 64KB or 128KB. - RUBY_FIBER_MACHINE_STACK_SIZE: machine stack size used at fiber creation. default: 256KB or 256KB. This values are specified at launched timing. You can not change these values at running time. Environ variables are only *hints* because: - They are aligned to 4KB. - They have minimum values (depend on OSs). - Machine stack settings are ignored by some OSs. Default values especially fiber stack sizes are increased. This change affect Fiber's behavior: (1) You can run more complex program on a Fiber. (2) You can not make many (thousands) Fibers because of lack of address space (on 32bit CPU). If (2) bothers you, (a) Use 64bit CPU with big memory, or (b) Specify RUBY_FIBER_(VM|MACHINE)_STACK_SIZE correctly. You need to choose correct stack size carefully. These values are completely rely on systems (OS/compiler and so on). * vm_core.h (rb_vm_t::default_params): add to record above settings. * vm.c (RubyVM::DEFAULT_PARAMS): add new constant to see above setting. * thread_pthread.c: support RUBY_THREAD_MACHINE_STACK_SIZE. * cont.c: support RUBY_FIBER_(VM|MACHINE)_STACK_SIZE. * test/ruby/test_fiber.rb: add tests for above. * test/ruby/test_thread.rb: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38478 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-20 02:29:18 +04:00
th->stack_size = th->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
th->stack = ALLOC_N(VALUE, th->stack_size);
th->cfp = (void *)(th->stack + th->stack_size);
th->cfp--;
th->cfp->pc = 0;
th->cfp->sp = th->stack + 1;
#if VM_DEBUG_BP_CHECK
th->cfp->bp_check = 0;
#endif
* vm_core.h: remove lfp (local frame pointer) and rename dfp (dynamic frame pointer) to ep (environment pointer). This change make VM `normal' (similar to other interpreters). Before this commit: Each frame has two env pointers lfp and dfp. lfp points local environment which is method/class/toplevel frame. lfp[0] is block pointer. dfp is block local frame. dfp[0] points previous (parent) environment pointer. lfp == dfp when frame is method/class/toplevel. You can get lfp from dfp by traversing previous environment pointers. After this commit: Each frame has only `ep' to point respective enviornoment. If there is parent environment, then ep[0] points parent envioenment (as dfp). If there are no more environment, then ep[0] points block pointer (as lfp). We call such ep as `LEP' (local EP). We add some macros to get LEP and to detect LEP or not. In short, we replace dfp and lfp with ep and LEP. rb_block_t and rb_binding_t member `lfp' and `dfp' are removed and member `ep' is added. rename rb_thread_t's member `local_lfp' and `local_svar' to `root_lep' and `root_svar'. (VM_EP_PREV_EP(ep)): get previous environment pointer. This macro assume that ep is not LEP. (VM_EP_BLOCK_PTR(ep)): get block pointer. This macro assume that ep is LEP. (VM_EP_LEP_P(ep)): detect ep is LEP or not. (VM_ENVVAL_BLOCK_PTR(ptr)): make block pointer. (VM_ENVVAL_BLOCK_PTR_P(v)): detect v is block pointer. (VM_ENVVAL_PREV_EP_PTR(ptr)): make prev environment pointer. (VM_ENVVAL_PREV_EP_PTR_P(v)): detect v is prev env pointer. * vm.c: apply above changes. (VM_EP_LEP(ep)): get LEP. (VM_CF_LEP(cfp)): get LEP of cfp->ep. (VM_CF_PREV_EP(cfp)): utility function VM_EP_PREV_EP(cfp->ep). (VM_CF_BLOCK_PTR(cfp)): utility function VM_EP_BLOCK_PTR(cfp->ep). * vm.c, vm_eval.c, vm_insnhelper.c, vm_insnhelper.h, insns.def: apply above changes. * cont.c: ditto. * eval.c, eval_intern.h: ditto. * proc.c: ditto. * thread.c: ditto. * vm_dump.c: ditto. * vm_exec.h: fix function name (on vm debug mode). git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36030 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-06-11 07:14:59 +04:00
th->cfp->ep = th->stack;
*th->cfp->ep = VM_ENVVAL_BLOCK_PTR(0);
th->cfp->self = Qnil;
* revised r37993 to avoid SEGV/ILL in tests. In r37993, a method entry with VM_METHOD_TYPE_REFINED holds only the original method definition, so ci->me is set to a method entry allocated in the stack, and it causes SEGV/ILL. In this commit, a method entry with VM_METHOD_TYPE_REFINED holds the whole original method entry. Furthermore, rb_thread_mark() is changed to mark cfp->klass to avoid GC for iclasses created by copy_refinement_iclass(). * vm_method.c (rb_method_entry_make): add a method entry with VM_METHOD_TYPE_REFINED to the class refined by the refinement if the target module is a refinement. When a method entry with VM_METHOD_TYPE_UNDEF is invoked by vm_call_method(), a method with the same name is searched in refinements. If such a method is found, the method is invoked. Otherwise, the original method in the refined class (rb_method_definition_t::body.orig_me) is invoked. This change is made to simplify the normal method lookup and to improve the performance of normal method calls. * vm_method.c (EXPR1, search_method, rb_method_entry), vm_eval.c (rb_call0, rb_search_method_entry): do not use refinements for method lookup. * vm_insnhelper.c (vm_call_method): search methods in refinements if ci->me is VM_METHOD_TYPE_REFINED. If the method is called by super (i.e., ci->call == vm_call_super_method), skip the same method entry as the current method to avoid infinite call of the same method. * class.c (include_modules_at): add a refined method entry for each method defined in a module included in a refinement. * class.c (rb_prepend_module): set an empty table to RCLASS_M_TBL(klass) to add refined method entries, because refinements should have priority over prepended modules. * proc.c (mnew): use rb_method_entry_with_refinements() to get a refined method. * vm.c (rb_thread_mark): mark cfp->klass for iclasses created by copy_refinement_iclass(). * vm.c (Init_VM), cont.c (fiber_init): initialize th->cfp->klass. * test/ruby/test_refinement.rb (test_inline_method_cache): do not skip the test because it should pass successfully. * test/ruby/test_refinement.rb (test_redefine_refined_method): new test for the case a refined method is redefined. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38236 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-06 17:08:41 +04:00
th->cfp->klass = Qnil;
th->cfp->flag = 0;
th->cfp->iseq = 0;
th->cfp->proc = 0;
th->cfp->block_iseq = 0;
th->cfp->me = 0;
th->tag = 0;
th->local_storage = st_init_numtable();
th->first_proc = proc;
#if !FIBER_USE_NATIVE
MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
#endif
return fibval;
}
/* :nodoc: */
static VALUE
rb_fiber_init(VALUE fibval)
{
return fiber_init(fibval, rb_block_proc());
}
VALUE
rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
{
return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
}
static VALUE
return_fiber(void)
{
rb_fiber_t *fib;
VALUE curr = rb_fiber_current();
VALUE prev;
GetFiberPtr(curr, fib);
prev = fib->prev;
if (NIL_P(prev)) {
const VALUE root_fiber = GET_THREAD()->root_fiber;
if (root_fiber == curr) {
rb_raise(rb_eFiberError, "can't yield from root fiber");
}
return root_fiber;
}
else {
fib->prev = Qnil;
return prev;
}
}
VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
static void
rb_fiber_terminate(rb_fiber_t *fib)
{
VALUE value = fib->cont.value;
fib->status = TERMINATED;
#if FIBER_USE_NATIVE && !defined(_WIN32)
/* Ruby must not switch to other thread until storing terminated_machine_stack */
terminated_machine_stack.ptr = fib->context.uc_stack.ss_sp;
terminated_machine_stack.size = fib->context.uc_stack.ss_size / sizeof(VALUE);
fib->context.uc_stack.ss_sp = NULL;
fib->cont.machine_stack = NULL;
fib->cont.machine_stack_size = 0;
#endif
rb_fiber_transfer(return_fiber(), 1, &value);
}
void
rb_fiber_start(void)
{
rb_thread_t *th = GET_THREAD();
rb_fiber_t *fib;
rb_context_t *cont;
rb_proc_t *proc;
int state;
GetFiberPtr(th->fiber, fib);
cont = &fib->cont;
TH_PUSH_TAG(th);
if ((state = EXEC_TAG()) == 0) {
int argc;
const VALUE *argv, args = cont->value;
GetProcPtr(cont->saved_thread.first_proc, proc);
argv = (argc = cont->argc) > 1 ? RARRAY_RAWPTR(args) : &args;
cont->value = Qnil;
th->errinfo = Qnil;
* vm_core.h: remove lfp (local frame pointer) and rename dfp (dynamic frame pointer) to ep (environment pointer). This change make VM `normal' (similar to other interpreters). Before this commit: Each frame has two env pointers lfp and dfp. lfp points local environment which is method/class/toplevel frame. lfp[0] is block pointer. dfp is block local frame. dfp[0] points previous (parent) environment pointer. lfp == dfp when frame is method/class/toplevel. You can get lfp from dfp by traversing previous environment pointers. After this commit: Each frame has only `ep' to point respective enviornoment. If there is parent environment, then ep[0] points parent envioenment (as dfp). If there are no more environment, then ep[0] points block pointer (as lfp). We call such ep as `LEP' (local EP). We add some macros to get LEP and to detect LEP or not. In short, we replace dfp and lfp with ep and LEP. rb_block_t and rb_binding_t member `lfp' and `dfp' are removed and member `ep' is added. rename rb_thread_t's member `local_lfp' and `local_svar' to `root_lep' and `root_svar'. (VM_EP_PREV_EP(ep)): get previous environment pointer. This macro assume that ep is not LEP. (VM_EP_BLOCK_PTR(ep)): get block pointer. This macro assume that ep is LEP. (VM_EP_LEP_P(ep)): detect ep is LEP or not. (VM_ENVVAL_BLOCK_PTR(ptr)): make block pointer. (VM_ENVVAL_BLOCK_PTR_P(v)): detect v is block pointer. (VM_ENVVAL_PREV_EP_PTR(ptr)): make prev environment pointer. (VM_ENVVAL_PREV_EP_PTR_P(v)): detect v is prev env pointer. * vm.c: apply above changes. (VM_EP_LEP(ep)): get LEP. (VM_CF_LEP(cfp)): get LEP of cfp->ep. (VM_CF_PREV_EP(cfp)): utility function VM_EP_PREV_EP(cfp->ep). (VM_CF_BLOCK_PTR(cfp)): utility function VM_EP_BLOCK_PTR(cfp->ep). * vm.c, vm_eval.c, vm_insnhelper.c, vm_insnhelper.h, insns.def: apply above changes. * cont.c: ditto. * eval.c, eval_intern.h: ditto. * proc.c: ditto. * thread.c: ditto. * vm_dump.c: ditto. * vm_exec.h: fix function name (on vm debug mode). git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36030 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-06-11 07:14:59 +04:00
th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
th->root_svar = Qnil;
fib->status = RUNNING;
cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0);
}
TH_POP_TAG();
if (state) {
if (state == TAG_RAISE || state == TAG_FATAL) {
rb_threadptr_pending_interrupt_enque(th, th->errinfo);
}
else {
VALUE err = rb_vm_make_jump_tag_but_local_jump(state, th->errinfo);
if (!NIL_P(err))
rb_threadptr_pending_interrupt_enque(th, err);
}
RUBY_VM_SET_INTERRUPT(th);
}
rb_fiber_terminate(fib);
rb_bug("rb_fiber_start: unreachable");
}
static rb_fiber_t *
root_fiber_alloc(rb_thread_t *th)
{
rb_fiber_t *fib;
/* no need to allocate vm stack */
fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
fib->cont.type = ROOT_FIBER_CONTEXT;
#if FIBER_USE_NATIVE
#ifdef _WIN32
fib->fib_handle = ConvertThreadToFiber(0);
#endif
#endif
fib->status = RUNNING;
fib->prev_fiber = fib->next_fiber = fib;
return fib;
}
VALUE
rb_fiber_current(void)
{
rb_thread_t *th = GET_THREAD();
if (th->fiber == 0) {
/* save root */
rb_fiber_t *fib = root_fiber_alloc(th);
th->root_fiber = th->fiber = fib->cont.self;
}
return th->fiber;
}
static VALUE
fiber_store(rb_fiber_t *next_fib)
{
rb_thread_t *th = GET_THREAD();
rb_fiber_t *fib;
if (th->fiber) {
GetFiberPtr(th->fiber, fib);
cont_save_thread(&fib->cont, th);
}
else {
/* create current fiber */
fib = root_fiber_alloc(th);
th->root_fiber = th->fiber = fib->cont.self;
}
#if !FIBER_USE_NATIVE
cont_save_machine_stack(th, &fib->cont);
#endif
if (FIBER_USE_NATIVE || ruby_setjmp(fib->cont.jmpbuf)) {
#if FIBER_USE_NATIVE
fiber_setcontext(next_fib, fib);
#ifndef _WIN32
if (terminated_machine_stack.ptr) {
if (machine_stack_cache_index < MAX_MACHINE_STACK_CACHE) {
machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
machine_stack_cache_index++;
}
else {
if (terminated_machine_stack.ptr != fib->cont.machine_stack) {
munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
}
else {
rb_bug("terminated fiber resumed");
}
}
terminated_machine_stack.ptr = NULL;
terminated_machine_stack.size = 0;
}
#endif
#endif
/* restored */
GetFiberPtr(th->fiber, fib);
if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
return fib->cont.value;
}
#if !FIBER_USE_NATIVE
else {
return Qundef;
}
#endif
}
static inline VALUE
fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
{
VALUE value;
rb_fiber_t *fib;
rb_context_t *cont;
rb_thread_t *th = GET_THREAD();
GetFiberPtr(fibval, fib);
cont = &fib->cont;
if (th->fiber == fibval) {
/* ignore fiber context switch
* because destination fiber is same as current fiber
*/
return make_passing_arg(argc, argv);
}
if (cont->saved_thread.self != th->self) {
rb_raise(rb_eFiberError, "fiber called across threads");
}
else if (cont->saved_thread.protect_tag != th->protect_tag) {
rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
}
else if (fib->status == TERMINATED) {
value = rb_exc_new2(rb_eFiberError, "dead fiber called");
if (th->fiber != fibval) {
GetFiberPtr(th->fiber, fib);
if (fib->status != TERMINATED) rb_exc_raise(value);
fibval = th->root_fiber;
}
else {
fibval = fib->prev;
if (NIL_P(fibval)) fibval = th->root_fiber;
}
GetFiberPtr(fibval, fib);
cont = &fib->cont;
cont->argc = -1;
cont->value = value;
#if FIBER_USE_NATIVE
{
VALUE oldfibval;
rb_fiber_t *oldfib;
oldfibval = rb_fiber_current();
GetFiberPtr(oldfibval, oldfib);
fiber_setcontext(fib, oldfib);
}
#else
cont_restore_0(cont, &value);
#endif
}
if (is_resume) {
fib->prev = rb_fiber_current();
}
* vm_trace.c, vm_core.h: simplify tracing mechanism. (1) add rb_hook_list_t data structure which includes hooks, events (flag) and `need_clean' flag. If the last flag is true, then clean the hooks list. In other words, deleted hooks are contained by `hooks'. Cleanup process should run before traversing the list. (2) Change check mechanism See EXEC_EVENT_HOOK() in vm_core.h. (3) Add `raw' hooks APIs Normal hooks are guarded from exception by rb_protect(). However, this protection is overhead for too simple functions which never cause exceptions. `raw' hooks are executed without protection and faster. Now, we only provide registration APIs. All `raw' hooks are kicked under protection (same as normal hooks). * include/ruby/ruby.h: remove internal data definition and macros. * internal.h (ruby_suppress_tracing), vm_trace.c: rename ruby_suppress_tracing() to rb_suppress_tracing() and remove unused function parameter. * parse.y: fix to use renamed rb_suppress_tracing(). * thread.c (thread_create_core): no need to set RUBY_VM_VM. * vm.c (mark_event_hooks): move definition to vm_trace.c. * vm.c (ruby_vm_event_flags): add a global variable. This global variable represents all of Threads and VM's event masks (T1#events | T2#events | ... | VM#events). You can check the possibility kick trace func or not with ruby_vm_event_flags. ruby_vm_event_flags is maintained by vm_trace.c. * cont.c (fiber_switch, rb_cont_call): restore tracing status. [Feature #4347] * test/ruby/test_continuation.rb: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36715 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-08-16 15:41:24 +04:00
else {
/* restore `tracing' context. see [Feature #4347] */
th->trace_arg = cont->saved_thread.trace_arg;
* vm_trace.c, vm_core.h: simplify tracing mechanism. (1) add rb_hook_list_t data structure which includes hooks, events (flag) and `need_clean' flag. If the last flag is true, then clean the hooks list. In other words, deleted hooks are contained by `hooks'. Cleanup process should run before traversing the list. (2) Change check mechanism See EXEC_EVENT_HOOK() in vm_core.h. (3) Add `raw' hooks APIs Normal hooks are guarded from exception by rb_protect(). However, this protection is overhead for too simple functions which never cause exceptions. `raw' hooks are executed without protection and faster. Now, we only provide registration APIs. All `raw' hooks are kicked under protection (same as normal hooks). * include/ruby/ruby.h: remove internal data definition and macros. * internal.h (ruby_suppress_tracing), vm_trace.c: rename ruby_suppress_tracing() to rb_suppress_tracing() and remove unused function parameter. * parse.y: fix to use renamed rb_suppress_tracing(). * thread.c (thread_create_core): no need to set RUBY_VM_VM. * vm.c (mark_event_hooks): move definition to vm_trace.c. * vm.c (ruby_vm_event_flags): add a global variable. This global variable represents all of Threads and VM's event masks (T1#events | T2#events | ... | VM#events). You can check the possibility kick trace func or not with ruby_vm_event_flags. ruby_vm_event_flags is maintained by vm_trace.c. * cont.c (fiber_switch, rb_cont_call): restore tracing status. [Feature #4347] * test/ruby/test_continuation.rb: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36715 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-08-16 15:41:24 +04:00
}
cont->argc = argc;
cont->value = make_passing_arg(argc, argv);
value = fiber_store(fib);
#if !FIBER_USE_NATIVE
if (value == Qundef) {
cont_restore_0(cont, &value);
rb_bug("rb_fiber_resume: unreachable");
}
#endif
RUBY_VM_CHECK_INTS(th);
return value;
}
VALUE
rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
{
return fiber_switch(fib, argc, argv, 0);
}
VALUE
rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
{
rb_fiber_t *fib;
GetFiberPtr(fibval, fib);
if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) {
rb_raise(rb_eFiberError, "double resume");
}
if (fib->transfered != 0) {
rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
}
return fiber_switch(fibval, argc, argv, 1);
}
VALUE
rb_fiber_yield(int argc, VALUE *argv)
{
return rb_fiber_transfer(return_fiber(), argc, argv);
}
void
rb_fiber_reset_root_local_storage(VALUE thval)
{
rb_thread_t *th;
rb_fiber_t *fib;
GetThreadPtr(thval, th);
if (th->root_fiber && th->root_fiber != th->fiber) {
GetFiberPtr(th->root_fiber, fib);
th->local_storage = fib->cont.saved_thread.local_storage;
}
}
/*
* call-seq:
* fiber.alive? -> true or false
*
* Returns true if the fiber can still be resumed (or transferred
* to). After finishing execution of the fiber block this method will
* always return false. You need to <code>require 'fiber'</code>
* before using this method.
*/
VALUE
rb_fiber_alive_p(VALUE fibval)
{
rb_fiber_t *fib;
GetFiberPtr(fibval, fib);
return fib->status != TERMINATED ? Qtrue : Qfalse;
}
/*
* call-seq:
* fiber.resume(args, ...) -> obj
*
* Resumes the fiber from the point at which the last <code>Fiber.yield</code>
* was called, or starts running it if it is the first call to
* <code>resume</code>. Arguments passed to resume will be the value of
* the <code>Fiber.yield</code> expression or will be passed as block
* parameters to the fiber's block if this is the first <code>resume</code>.
*
* Alternatively, when resume is called it evaluates to the arguments passed
* to the next <code>Fiber.yield</code> statement inside the fiber's block
* or to the block value if it runs to completion without any
* <code>Fiber.yield</code>
*/
static VALUE
rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
{
return rb_fiber_resume(fib, argc, argv);
}
/*
* call-seq:
* fiber.transfer(args, ...) -> obj
*
* Transfer control to another fiber, resuming it from where it last
* stopped or starting it if it was not resumed before. The calling
* fiber will be suspended much like in a call to
* <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
* before using this method.
*
* The fiber which receives the transfer call is treats it much like
* a resume call. Arguments passed to transfer are treated like those
* passed to resume.
*
* You cannot resume a fiber that transferred control to another one.
* This will cause a double resume error. You need to transfer control
* back to this fiber before it can yield and resume.
*
* Example:
*
* fiber1 = Fiber.new do
* puts "In Fiber 1"
* Fiber.yield
* end
*
* fiber2 = Fiber.new do
* puts "In Fiber 2"
* fiber1.transfer
* puts "Never see this message"
* end
*
* fiber3 = Fiber.new do
* puts "In Fiber 3"
* end
*
* fiber2.resume
* fiber3.resume
*
* <em>produces</em>
*
* In fiber 2
* In fiber 1
* In fiber 3
*
*/
static VALUE
rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
{
rb_fiber_t *fib;
GetFiberPtr(fibval, fib);
fib->transfered = 1;
return rb_fiber_transfer(fibval, argc, argv);
}
/*
* call-seq:
* Fiber.yield(args, ...) -> obj
*
* Yields control back to the context that resumed the fiber, passing
* along any arguments that were passed to it. The fiber will resume
* processing at this point when <code>resume</code> is called next.
* Any arguments passed to the next <code>resume</code> will be the
* value that this <code>Fiber.yield</code> expression evaluates to.
*/
static VALUE
rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
{
return rb_fiber_yield(argc, argv);
}
/*
* call-seq:
* Fiber.current() -> fiber
*
* Returns the current fiber. You need to <code>require 'fiber'</code>
* before using this method. If you are not running in the context of
* a fiber this method will return the root fiber.
*/
static VALUE
rb_fiber_s_current(VALUE klass)
{
return rb_fiber_current();
}
/*
* Document-class: FiberError
*
* Raised when an invalid operation is attempted on a Fiber, in
* particular when attempting to call/resume a dead fiber,
* attempting to yield from the root fiber, or calling a fiber across
* threads.
*
* fiber = Fiber.new{}
* fiber.resume #=> nil
* fiber.resume #=> FiberError: dead fiber called
*/
void
Init_Cont(void)
{
#if FIBER_USE_NATIVE
rb_thread_t *th = GET_THREAD();
#ifdef _WIN32
SYSTEM_INFO info;
GetSystemInfo(&info);
pagesize = info.dwPageSize;
#else /* not WIN32 */
pagesize = sysconf(_SC_PAGESIZE);
#endif
SET_MACHINE_STACK_END(&th->machine_stack_end);
#endif
rb_cFiber = rb_define_class("Fiber", rb_cObject);
rb_define_alloc_func(rb_cFiber, fiber_alloc);
rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
}
RUBY_SYMBOL_EXPORT_BEGIN
void
ruby_Init_Continuation_body(void)
{
rb_cContinuation = rb_define_class("Continuation", rb_cObject);
rb_undef_alloc_func(rb_cContinuation);
rb_undef_method(CLASS_OF(rb_cContinuation), "new");
rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
rb_define_global_function("callcc", rb_callcc, 0);
}
void
ruby_Init_Fiber_as_Coroutine(void)
{
rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
}
RUBY_SYMBOL_EXPORT_END