2006-12-31 18:02:22 +03:00
|
|
|
/* -*-c-*- */
|
|
|
|
/**********************************************************************
|
|
|
|
|
2007-12-20 12:29:46 +03:00
|
|
|
thread_win32.c -
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
$Author$
|
|
|
|
|
* blockinlining.c, compile.c, compile.h, debug.c, debug.h,
id.c, insnhelper.h, insns.def, thread.c, thread_pthread.ci,
thread_pthread.h, thread_win32.ci, thread_win32.h, vm.h,
vm_dump.c, vm_evalbody.ci, vm_opts.h: fix comments and
copyright year.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@13920 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-11-14 01:13:04 +03:00
|
|
|
Copyright (C) 2004-2007 Koichi Sasada
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
|
|
|
|
|
|
|
|
#include <process.h>
|
|
|
|
|
2012-03-29 05:17:39 +04:00
|
|
|
#define TIME_QUANTUM_USEC (10 * 1000)
|
2011-05-06 21:50:23 +04:00
|
|
|
#define RB_CONDATTR_CLOCK_MONOTONIC 1 /* no effect */
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
#undef Sleep
|
|
|
|
|
|
|
|
#define native_thread_yield() Sleep(0)
|
2015-11-30 23:33:20 +03:00
|
|
|
#define unregister_ubf_list(th)
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2007-08-18 12:40:13 +04:00
|
|
|
static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
|
|
|
|
|
2011-04-29 16:12:52 +04:00
|
|
|
static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
|
2013-07-23 13:53:14 +04:00
|
|
|
static int native_mutex_lock(rb_nativethread_lock_t *lock);
|
|
|
|
static int native_mutex_unlock(rb_nativethread_lock_t *lock);
|
2011-04-29 16:12:52 +04:00
|
|
|
|
2010-11-27 23:15:59 +03:00
|
|
|
static void
|
|
|
|
w32_error(const char *func)
|
|
|
|
{
|
|
|
|
LPVOID lpMsgBuf;
|
|
|
|
DWORD err = GetLastError();
|
|
|
|
if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
|
|
|
FORMAT_MESSAGE_FROM_SYSTEM |
|
|
|
|
FORMAT_MESSAGE_IGNORE_INSERTS,
|
|
|
|
NULL,
|
|
|
|
err,
|
|
|
|
MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
|
|
|
|
(LPTSTR) & lpMsgBuf, 0, NULL) == 0)
|
|
|
|
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
|
|
|
FORMAT_MESSAGE_FROM_SYSTEM |
|
|
|
|
FORMAT_MESSAGE_IGNORE_INSERTS,
|
|
|
|
NULL,
|
|
|
|
err,
|
|
|
|
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
|
|
|
(LPTSTR) & lpMsgBuf, 0, NULL);
|
|
|
|
rb_bug("%s: %s", func, (char*)lpMsgBuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
w32_mutex_lock(HANDLE lock)
|
|
|
|
{
|
|
|
|
DWORD result;
|
|
|
|
while (1) {
|
|
|
|
thread_debug("native_mutex_lock: %p\n", lock);
|
|
|
|
result = w32_wait_events(&lock, 1, INFINITE, 0);
|
|
|
|
switch (result) {
|
|
|
|
case WAIT_OBJECT_0:
|
|
|
|
/* get mutex object */
|
|
|
|
thread_debug("acquire mutex: %p\n", lock);
|
|
|
|
return 0;
|
|
|
|
case WAIT_OBJECT_0 + 1:
|
|
|
|
/* interrupt */
|
|
|
|
errno = EINTR;
|
|
|
|
thread_debug("acquire mutex interrupted: %p\n", lock);
|
|
|
|
return 0;
|
|
|
|
case WAIT_TIMEOUT:
|
|
|
|
thread_debug("timeout mutex: %p\n", lock);
|
|
|
|
break;
|
|
|
|
case WAIT_ABANDONED:
|
|
|
|
rb_bug("win32_mutex_lock: WAIT_ABANDONED");
|
|
|
|
break;
|
|
|
|
default:
|
2010-11-28 08:55:49 +03:00
|
|
|
rb_bug("win32_mutex_lock: unknown result (%ld)", result);
|
2010-11-27 23:15:59 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static HANDLE
|
|
|
|
w32_mutex_create(void)
|
|
|
|
{
|
|
|
|
HANDLE lock = CreateMutex(NULL, FALSE, NULL);
|
|
|
|
if (lock == NULL) {
|
|
|
|
w32_error("native_mutex_initialize");
|
|
|
|
}
|
|
|
|
return lock;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define GVL_DEBUG 0
|
|
|
|
|
|
|
|
static void
|
|
|
|
gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
|
|
|
|
{
|
|
|
|
w32_mutex_lock(vm->gvl.lock);
|
|
|
|
if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gvl_release(rb_vm_t *vm)
|
|
|
|
{
|
|
|
|
ReleaseMutex(vm->gvl.lock);
|
|
|
|
}
|
|
|
|
|
2011-06-13 19:06:30 +04:00
|
|
|
static void
|
|
|
|
gvl_yield(rb_vm_t *vm, rb_thread_t *th)
|
|
|
|
{
|
|
|
|
gvl_release(th->vm);
|
|
|
|
native_thread_yield();
|
|
|
|
gvl_acquire(vm, th);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-27 23:15:59 +03:00
|
|
|
static void
|
|
|
|
gvl_atfork(rb_vm_t *vm)
|
|
|
|
{
|
|
|
|
rb_bug("gvl_atfork() is called on win32");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gvl_init(rb_vm_t *vm)
|
|
|
|
{
|
|
|
|
if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
|
|
|
|
vm->gvl.lock = w32_mutex_create();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gvl_destroy(rb_vm_t *vm)
|
|
|
|
{
|
|
|
|
if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
|
|
|
|
CloseHandle(vm->gvl.lock);
|
|
|
|
}
|
|
|
|
|
2007-08-18 12:40:13 +04:00
|
|
|
static rb_thread_t *
|
|
|
|
ruby_thread_from_native(void)
|
|
|
|
{
|
|
|
|
return TlsGetValue(ruby_native_thread_key);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ruby_thread_set_native(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
return TlsSetValue(ruby_native_thread_key, th);
|
|
|
|
}
|
|
|
|
|
2010-06-06 03:26:43 +04:00
|
|
|
void
|
2007-08-18 12:40:13 +04:00
|
|
|
Init_native_thread(void)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
* blockinlining.c, error.c, eval.c, eval_error.h, eval_intern.h,
eval_jump.h, eval_load.c, eval_safe.h, gc.c, proc.c, signal.c,
thread.c, thread_pthread.ci, thread_win32.ci, vm.c, vm.h,
vm_dump.c, vm_evalbody.ci, yarvcore.c, yarvcore.h:
fix typo (rb_thead_t -> rb_thread_t).
* eval_intern.h: remove unused definitions.
* common.mk: fix around vm_opts.h path
and remove harmful argument passed to insns2vm.rb.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11658 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-08 09:37:46 +03:00
|
|
|
rb_thread_t *th = GET_THREAD();
|
2007-08-18 12:40:13 +04:00
|
|
|
|
|
|
|
ruby_native_thread_key = TlsAlloc();
|
2008-12-22 13:12:02 +03:00
|
|
|
ruby_thread_set_native(th);
|
2006-12-31 18:02:22 +03:00
|
|
|
DuplicateHandle(GetCurrentProcess(),
|
|
|
|
GetCurrentThread(),
|
|
|
|
GetCurrentProcess(),
|
|
|
|
&th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
|
|
|
|
|
|
|
|
th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
|
|
|
|
|
|
|
|
thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
|
|
|
|
th, GET_THREAD()->thread_id,
|
|
|
|
th->native_thread_data.interrupt_event);
|
|
|
|
}
|
|
|
|
|
2007-02-09 06:42:46 +03:00
|
|
|
static void
|
|
|
|
w32_set_event(HANDLE handle)
|
|
|
|
{
|
|
|
|
if (SetEvent(handle) == 0) {
|
2009-12-30 20:36:20 +03:00
|
|
|
w32_error("w32_set_event");
|
2007-02-09 06:42:46 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
w32_reset_event(HANDLE handle)
|
|
|
|
{
|
|
|
|
if (ResetEvent(handle) == 0) {
|
2009-12-30 20:36:20 +03:00
|
|
|
w32_error("w32_reset_event");
|
2007-02-09 06:42:46 +03:00
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2007-02-23 12:33:53 +03:00
|
|
|
w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2007-02-23 12:33:53 +03:00
|
|
|
HANDLE *targets = events;
|
2007-04-10 11:16:42 +04:00
|
|
|
HANDLE intr;
|
2006-12-31 18:02:22 +03:00
|
|
|
DWORD ret;
|
|
|
|
|
2007-02-23 12:33:53 +03:00
|
|
|
thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
|
|
|
|
events, count, timeout, th);
|
2007-04-10 11:16:42 +04:00
|
|
|
if (th && (intr = th->native_thread_data.interrupt_event)) {
|
2010-11-27 23:15:59 +03:00
|
|
|
gvl_acquire(th->vm, th);
|
2010-05-05 15:53:03 +04:00
|
|
|
if (intr == th->native_thread_data.interrupt_event) {
|
|
|
|
w32_reset_event(intr);
|
|
|
|
if (RUBY_VM_INTERRUPTED(th)) {
|
|
|
|
w32_set_event(intr);
|
|
|
|
}
|
|
|
|
|
|
|
|
targets = ALLOCA_N(HANDLE, count + 1);
|
|
|
|
memcpy(targets, events, sizeof(HANDLE) * count);
|
|
|
|
|
|
|
|
targets[count++] = intr;
|
|
|
|
thread_debug(" * handle: %p (count: %d, intr)\n", intr, count);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
2010-11-27 23:15:59 +03:00
|
|
|
gvl_release(th->vm);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
thread_debug(" WaitForMultipleObjects start (count: %d)\n", count);
|
2007-02-23 12:33:53 +03:00
|
|
|
ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
|
2008-04-26 13:36:35 +04:00
|
|
|
thread_debug(" WaitForMultipleObjects end (ret: %lu)\n", ret);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2010-09-23 14:29:27 +04:00
|
|
|
if (ret == (DWORD)(WAIT_OBJECT_0 + count - 1) && th) {
|
2006-12-31 18:02:22 +03:00
|
|
|
errno = EINTR;
|
|
|
|
}
|
2010-09-23 14:29:27 +04:00
|
|
|
if (ret == WAIT_FAILED && THREAD_DEBUG) {
|
2006-12-31 18:02:22 +03:00
|
|
|
int i;
|
|
|
|
DWORD dmy;
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
thread_debug(" * error handle %d - %s\n", i,
|
2007-02-23 12:33:53 +03:00
|
|
|
GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-11-20 13:47:53 +03:00
|
|
|
static void ubf_handle(void *ptr);
|
2007-02-23 12:33:53 +03:00
|
|
|
#define ubf_select ubf_handle
|
|
|
|
|
2007-02-24 12:43:40 +03:00
|
|
|
int
|
|
|
|
rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
|
|
|
|
{
|
2012-01-28 04:51:36 +04:00
|
|
|
return w32_wait_events(events, num, timeout, ruby_thread_from_native());
|
2007-02-24 12:43:40 +03:00
|
|
|
}
|
|
|
|
|
2007-02-23 12:33:53 +03:00
|
|
|
int
|
|
|
|
rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2007-11-20 13:47:53 +03:00
|
|
|
BLOCKING_REGION(ret = rb_w32_wait_events_blocking(events, num, timeout),
|
2012-11-28 17:57:52 +04:00
|
|
|
ubf_handle, ruby_thread_from_native(), FALSE);
|
2007-02-23 12:33:53 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-02-09 06:42:46 +03:00
|
|
|
static void
|
|
|
|
w32_close_handle(HANDLE handle)
|
|
|
|
{
|
|
|
|
if (CloseHandle(handle) == 0) {
|
2009-12-30 20:36:20 +03:00
|
|
|
w32_error("w32_close_handle");
|
2007-02-09 06:42:46 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
w32_resume_thread(HANDLE handle)
|
|
|
|
{
|
2010-09-23 14:29:27 +04:00
|
|
|
if (ResumeThread(handle) == (DWORD)-1) {
|
2009-12-30 20:36:20 +03:00
|
|
|
w32_error("w32_resume_thread");
|
2007-02-09 06:42:46 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-21 12:48:29 +04:00
|
|
|
#ifdef _MSC_VER
|
|
|
|
#define HAVE__BEGINTHREADEX 1
|
|
|
|
#else
|
|
|
|
#undef HAVE__BEGINTHREADEX
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef HAVE__BEGINTHREADEX
|
|
|
|
#define start_thread (HANDLE)_beginthreadex
|
2009-11-12 08:33:23 +03:00
|
|
|
#define thread_errno errno
|
2013-07-08 18:12:26 +04:00
|
|
|
typedef unsigned long (__stdcall *w32_thread_start_func)(void*);
|
2007-07-21 12:48:29 +04:00
|
|
|
#else
|
|
|
|
#define start_thread CreateThread
|
2009-11-12 08:33:23 +03:00
|
|
|
#define thread_errno rb_w32_map_errno(GetLastError())
|
2007-07-21 12:48:29 +04:00
|
|
|
typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
|
|
|
|
#endif
|
|
|
|
|
2007-02-09 06:42:46 +03:00
|
|
|
static HANDLE
|
2007-07-21 12:48:29 +04:00
|
|
|
w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
|
2007-02-09 06:42:46 +03:00
|
|
|
{
|
2007-07-21 12:48:29 +04:00
|
|
|
return start_thread(0, stack_size, func, val, CREATE_SUSPENDED, 0);
|
2007-02-09 06:42:46 +03:00
|
|
|
}
|
|
|
|
|
2007-02-23 12:33:53 +03:00
|
|
|
int
|
|
|
|
rb_w32_sleep(unsigned long msec)
|
|
|
|
{
|
2012-01-28 04:51:36 +04:00
|
|
|
return w32_wait_events(0, 0, msec, ruby_thread_from_native());
|
2007-02-23 12:33:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int WINAPI
|
|
|
|
rb_w32_Sleep(unsigned long msec)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2007-11-20 13:47:53 +03:00
|
|
|
BLOCKING_REGION(ret = rb_w32_sleep(msec),
|
2012-11-28 17:57:52 +04:00
|
|
|
ubf_handle, ruby_thread_from_native(), FALSE);
|
2007-02-23 12:33:53 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2007-02-08 14:51:40 +03:00
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
static void
|
2008-07-16 23:19:36 +04:00
|
|
|
native_sleep(rb_thread_t *th, struct timeval *tv)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2013-03-21 18:17:46 +04:00
|
|
|
const volatile DWORD msec = (tv) ?
|
2013-02-27 08:47:10 +04:00
|
|
|
(DWORD)(tv->tv_sec * 1000 + tv->tv_usec / 1000) : INFINITE;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
GVL_UNLOCK_BEGIN();
|
|
|
|
{
|
2008-07-09 17:41:19 +04:00
|
|
|
DWORD ret;
|
|
|
|
|
2008-06-21 11:48:29 +04:00
|
|
|
native_mutex_lock(&th->interrupt_lock);
|
2008-05-30 05:52:38 +04:00
|
|
|
th->unblock.func = ubf_handle;
|
|
|
|
th->unblock.arg = th;
|
2008-06-21 11:48:29 +04:00
|
|
|
native_mutex_unlock(&th->interrupt_lock);
|
2007-12-25 07:16:06 +03:00
|
|
|
|
|
|
|
if (RUBY_VM_INTERRUPTED(th)) {
|
|
|
|
/* interrupted. return immediate */
|
|
|
|
}
|
|
|
|
else {
|
2008-04-26 13:36:35 +04:00
|
|
|
thread_debug("native_sleep start (%lu)\n", msec);
|
2007-12-25 07:16:06 +03:00
|
|
|
ret = w32_wait_events(0, 0, msec, th);
|
2008-04-26 13:36:35 +04:00
|
|
|
thread_debug("native_sleep done (%lu)\n", ret);
|
2007-12-25 07:16:06 +03:00
|
|
|
}
|
|
|
|
|
2008-06-21 11:48:29 +04:00
|
|
|
native_mutex_lock(&th->interrupt_lock);
|
2008-05-30 05:52:38 +04:00
|
|
|
th->unblock.func = 0;
|
|
|
|
th->unblock.arg = 0;
|
2008-06-21 11:48:29 +04:00
|
|
|
native_mutex_unlock(&th->interrupt_lock);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
GVL_UNLOCK_END();
|
|
|
|
}
|
|
|
|
|
2007-12-25 07:35:17 +03:00
|
|
|
static int
|
2013-07-23 13:53:14 +04:00
|
|
|
native_mutex_lock(rb_nativethread_lock_t *lock)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
|
|
|
#if USE_WIN32_MUTEX
|
2012-06-17 11:21:03 +04:00
|
|
|
w32_mutex_lock(lock->mutex);
|
2006-12-31 18:02:22 +03:00
|
|
|
#else
|
2012-06-17 11:21:03 +04:00
|
|
|
EnterCriticalSection(&lock->crit);
|
2006-12-31 18:02:22 +03:00
|
|
|
#endif
|
2012-06-17 11:20:58 +04:00
|
|
|
return 0;
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
2007-12-25 07:35:17 +03:00
|
|
|
static int
|
2013-07-23 13:53:14 +04:00
|
|
|
native_mutex_unlock(rb_nativethread_lock_t *lock)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
|
|
|
#if USE_WIN32_MUTEX
|
2012-06-17 11:21:03 +04:00
|
|
|
thread_debug("release mutex: %p\n", lock->mutex);
|
|
|
|
return ReleaseMutex(lock->mutex);
|
2006-12-31 18:02:22 +03:00
|
|
|
#else
|
2012-06-17 11:21:03 +04:00
|
|
|
LeaveCriticalSection(&lock->crit);
|
2006-12-31 18:02:22 +03:00
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2007-12-25 07:35:17 +03:00
|
|
|
static int
|
2013-07-23 13:53:14 +04:00
|
|
|
native_mutex_trylock(rb_nativethread_lock_t *lock)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2007-06-05 08:30:38 +04:00
|
|
|
#if USE_WIN32_MUTEX
|
2006-12-31 18:02:22 +03:00
|
|
|
int result;
|
2012-06-17 11:21:03 +04:00
|
|
|
thread_debug("native_mutex_trylock: %p\n", lock->mutex);
|
|
|
|
result = w32_wait_events(&lock->mutex, 1, 1, 0);
|
2006-12-31 18:02:22 +03:00
|
|
|
thread_debug("native_mutex_trylock result: %d\n", result);
|
|
|
|
switch (result) {
|
* compile.c, dir.c, eval.c, eval_jump.h, eval_method.h, numeric.c,
pack.c, parse.y, re.c, thread.c, vm.c, vm_dump.c, call_cfunc.ci,
thread_pthread.ci, thread_win32.ci: fixed indentation.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12431 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-05 08:25:10 +04:00
|
|
|
case WAIT_OBJECT_0:
|
2006-12-31 18:02:22 +03:00
|
|
|
return 0;
|
* compile.c, dir.c, eval.c, eval_jump.h, eval_method.h, numeric.c,
pack.c, parse.y, re.c, thread.c, vm.c, vm_dump.c, call_cfunc.ci,
thread_pthread.ci, thread_win32.ci: fixed indentation.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12431 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-05 08:25:10 +04:00
|
|
|
case WAIT_TIMEOUT:
|
2006-12-31 18:02:22 +03:00
|
|
|
return EBUSY;
|
|
|
|
}
|
|
|
|
return EINVAL;
|
|
|
|
#else
|
2012-06-17 11:21:03 +04:00
|
|
|
return TryEnterCriticalSection(&lock->crit) == 0;
|
2006-12-31 18:02:22 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2007-12-25 07:35:17 +03:00
|
|
|
static void
|
2013-07-23 13:53:14 +04:00
|
|
|
native_mutex_initialize(rb_nativethread_lock_t *lock)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2007-06-05 08:30:38 +04:00
|
|
|
#if USE_WIN32_MUTEX
|
2012-06-17 11:21:03 +04:00
|
|
|
lock->mutex = w32_mutex_create();
|
|
|
|
/* thread_debug("initialize mutex: %p\n", lock->mutex); */
|
2006-12-31 18:02:22 +03:00
|
|
|
#else
|
2012-06-17 11:21:03 +04:00
|
|
|
InitializeCriticalSection(&lock->crit);
|
2006-12-31 18:02:22 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2007-12-25 07:35:17 +03:00
|
|
|
static void
|
2013-07-23 13:53:14 +04:00
|
|
|
native_mutex_destroy(rb_nativethread_lock_t *lock)
|
2007-02-08 23:24:55 +03:00
|
|
|
{
|
2007-06-05 08:30:38 +04:00
|
|
|
#if USE_WIN32_MUTEX
|
2012-06-17 11:21:03 +04:00
|
|
|
w32_close_handle(lock->mutex);
|
2007-02-09 06:42:46 +03:00
|
|
|
#else
|
2012-06-17 11:21:03 +04:00
|
|
|
DeleteCriticalSection(&lock->crit);
|
2007-02-09 06:42:46 +03:00
|
|
|
#endif
|
2007-02-08 23:24:55 +03:00
|
|
|
}
|
|
|
|
|
2008-07-28 16:27:43 +04:00
|
|
|
struct cond_event_entry {
|
|
|
|
struct cond_event_entry* next;
|
2011-05-07 10:17:59 +04:00
|
|
|
struct cond_event_entry* prev;
|
2008-07-28 16:27:43 +04:00
|
|
|
HANDLE event;
|
|
|
|
};
|
|
|
|
|
2007-12-25 07:35:17 +03:00
|
|
|
static void
|
2013-07-23 14:50:32 +04:00
|
|
|
native_cond_signal(rb_nativethread_cond_t *cond)
|
2007-08-27 20:48:14 +04:00
|
|
|
{
|
|
|
|
/* cond is guarded by mutex */
|
|
|
|
struct cond_event_entry *e = cond->next;
|
2011-05-07 10:17:59 +04:00
|
|
|
struct cond_event_entry *head = (struct cond_event_entry*)cond;
|
|
|
|
|
|
|
|
if (e != head) {
|
|
|
|
struct cond_event_entry *next = e->next;
|
|
|
|
struct cond_event_entry *prev = e->prev;
|
|
|
|
|
|
|
|
prev->next = next;
|
|
|
|
next->prev = prev;
|
|
|
|
e->next = e->prev = e;
|
2007-08-27 20:48:14 +04:00
|
|
|
|
|
|
|
SetEvent(e->event);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-25 07:35:17 +03:00
|
|
|
static void
|
2013-07-23 14:50:32 +04:00
|
|
|
native_cond_broadcast(rb_nativethread_cond_t *cond)
|
2007-08-27 20:48:14 +04:00
|
|
|
{
|
|
|
|
/* cond is guarded by mutex */
|
|
|
|
struct cond_event_entry *e = cond->next;
|
2011-05-07 10:17:59 +04:00
|
|
|
struct cond_event_entry *head = (struct cond_event_entry*)cond;
|
|
|
|
|
|
|
|
while (e != head) {
|
|
|
|
struct cond_event_entry *next = e->next;
|
|
|
|
struct cond_event_entry *prev = e->prev;
|
2007-08-27 20:48:14 +04:00
|
|
|
|
|
|
|
SetEvent(e->event);
|
2011-05-07 10:17:59 +04:00
|
|
|
|
|
|
|
prev->next = next;
|
|
|
|
next->prev = prev;
|
|
|
|
e->next = e->prev = e;
|
|
|
|
|
|
|
|
e = next;
|
2007-08-27 20:48:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-29 08:18:29 +04:00
|
|
|
|
|
|
|
static int
|
2013-07-23 14:50:32 +04:00
|
|
|
native_cond_timedwait_ms(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
|
2007-08-27 20:48:14 +04:00
|
|
|
{
|
|
|
|
DWORD r;
|
|
|
|
struct cond_event_entry entry;
|
2011-05-07 10:17:59 +04:00
|
|
|
struct cond_event_entry *head = (struct cond_event_entry*)cond;
|
2007-08-27 20:48:14 +04:00
|
|
|
|
|
|
|
entry.event = CreateEvent(0, FALSE, FALSE, 0);
|
|
|
|
|
|
|
|
/* cond is guarded by mutex */
|
2011-05-07 10:17:59 +04:00
|
|
|
entry.next = head;
|
|
|
|
entry.prev = head->prev;
|
|
|
|
head->prev->next = &entry;
|
|
|
|
head->prev = &entry;
|
2007-08-27 20:48:14 +04:00
|
|
|
|
|
|
|
native_mutex_unlock(mutex);
|
|
|
|
{
|
2011-04-29 08:18:29 +04:00
|
|
|
r = WaitForSingleObject(entry.event, msec);
|
|
|
|
if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
|
2008-04-26 13:36:35 +04:00
|
|
|
rb_bug("native_cond_wait: WaitForSingleObject returns %lu", r);
|
2007-08-27 20:48:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
native_mutex_lock(mutex);
|
|
|
|
|
2011-05-07 10:17:59 +04:00
|
|
|
entry.prev->next = entry.next;
|
|
|
|
entry.next->prev = entry.prev;
|
|
|
|
|
2007-08-27 20:48:14 +04:00
|
|
|
w32_close_handle(entry.event);
|
2011-04-29 08:18:29 +04:00
|
|
|
return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2013-07-23 14:50:32 +04:00
|
|
|
native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
|
2011-04-29 08:18:29 +04:00
|
|
|
{
|
2012-06-17 11:20:53 +04:00
|
|
|
return native_cond_timedwait_ms(cond, mutex, INFINITE);
|
2011-04-29 08:18:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long
|
2014-05-09 18:31:34 +04:00
|
|
|
abs_timespec_to_timeout_ms(const struct timespec *ts)
|
2011-04-29 08:18:29 +04:00
|
|
|
{
|
|
|
|
struct timeval tv;
|
|
|
|
struct timeval now;
|
|
|
|
|
|
|
|
gettimeofday(&now, NULL);
|
|
|
|
tv.tv_sec = ts->tv_sec;
|
2011-05-07 07:43:27 +04:00
|
|
|
tv.tv_usec = ts->tv_nsec / 1000;
|
2011-04-29 08:18:29 +04:00
|
|
|
|
|
|
|
if (!rb_w32_time_subtract(&tv, &now))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2014-05-09 18:31:34 +04:00
|
|
|
native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, const struct timespec *ts)
|
2011-04-29 08:18:29 +04:00
|
|
|
{
|
|
|
|
unsigned long timeout_ms;
|
|
|
|
|
|
|
|
timeout_ms = abs_timespec_to_timeout_ms(ts);
|
|
|
|
if (!timeout_ms)
|
|
|
|
return ETIMEDOUT;
|
|
|
|
|
2012-06-17 11:20:53 +04:00
|
|
|
return native_cond_timedwait_ms(cond, mutex, timeout_ms);
|
2007-08-27 20:48:14 +04:00
|
|
|
}
|
|
|
|
|
2011-05-06 21:39:32 +04:00
|
|
|
static struct timespec
|
2013-07-23 14:50:32 +04:00
|
|
|
native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
|
2011-05-06 21:39:32 +04:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct timeval tv;
|
|
|
|
struct timespec timeout;
|
2011-05-06 22:17:14 +04:00
|
|
|
struct timespec now;
|
2011-05-06 21:39:32 +04:00
|
|
|
|
|
|
|
ret = gettimeofday(&tv, 0);
|
|
|
|
if (ret != 0)
|
|
|
|
rb_sys_fail(0);
|
2011-05-06 22:17:14 +04:00
|
|
|
now.tv_sec = tv.tv_sec;
|
|
|
|
now.tv_nsec = tv.tv_usec * 1000;
|
2011-05-06 21:39:32 +04:00
|
|
|
|
2011-05-06 22:17:14 +04:00
|
|
|
timeout.tv_sec = now.tv_sec;
|
|
|
|
timeout.tv_nsec = now.tv_nsec;
|
2011-05-06 21:39:32 +04:00
|
|
|
timeout.tv_sec += timeout_rel.tv_sec;
|
|
|
|
timeout.tv_nsec += timeout_rel.tv_nsec;
|
2011-05-06 22:17:14 +04:00
|
|
|
|
2011-05-06 21:39:32 +04:00
|
|
|
if (timeout.tv_nsec >= 1000*1000*1000) {
|
|
|
|
timeout.tv_sec++;
|
|
|
|
timeout.tv_nsec -= 1000*1000*1000;
|
|
|
|
}
|
2011-05-06 22:17:14 +04:00
|
|
|
|
|
|
|
if (timeout.tv_sec < now.tv_sec)
|
|
|
|
timeout.tv_sec = TIMET_MAX;
|
|
|
|
|
2011-05-06 21:39:32 +04:00
|
|
|
return timeout;
|
|
|
|
}
|
|
|
|
|
2007-12-25 07:35:17 +03:00
|
|
|
static void
|
2013-07-23 14:50:32 +04:00
|
|
|
native_cond_initialize(rb_nativethread_cond_t *cond, int flags)
|
2007-08-27 20:48:14 +04:00
|
|
|
{
|
2011-05-07 10:17:59 +04:00
|
|
|
cond->next = (struct cond_event_entry *)cond;
|
|
|
|
cond->prev = (struct cond_event_entry *)cond;
|
2007-08-27 20:48:14 +04:00
|
|
|
}
|
|
|
|
|
2007-12-25 07:35:17 +03:00
|
|
|
static void
|
2013-07-23 14:50:32 +04:00
|
|
|
native_cond_destroy(rb_nativethread_cond_t *cond)
|
2007-08-27 20:48:14 +04:00
|
|
|
{
|
|
|
|
/* */
|
|
|
|
}
|
2007-02-08 23:24:55 +03:00
|
|
|
|
2008-06-14 06:59:19 +04:00
|
|
|
void
|
2009-04-19 09:43:20 +04:00
|
|
|
ruby_init_stack(volatile VALUE *addr)
|
2008-06-14 06:59:19 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#define CHECK_ERR(expr) \
|
|
|
|
{if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
|
|
|
|
|
|
|
|
static void
|
|
|
|
native_thread_init_stack(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
MEMORY_BASIC_INFORMATION mi;
|
|
|
|
char *base, *end;
|
|
|
|
DWORD size, space;
|
|
|
|
|
|
|
|
CHECK_ERR(VirtualQuery(&mi, &mi, sizeof(mi)));
|
|
|
|
base = mi.AllocationBase;
|
|
|
|
end = mi.BaseAddress;
|
|
|
|
end += mi.RegionSize;
|
|
|
|
size = end - base;
|
|
|
|
space = size / 5;
|
|
|
|
if (space > 1024*1024) space = 1024*1024;
|
2014-01-28 10:09:58 +04:00
|
|
|
th->machine.stack_start = (VALUE *)end - 1;
|
|
|
|
th->machine.stack_maxsize = size - space;
|
2008-06-14 06:59:19 +04:00
|
|
|
}
|
|
|
|
|
2010-01-04 03:30:52 +03:00
|
|
|
#ifndef InterlockedExchangePointer
|
|
|
|
#define InterlockedExchangePointer(t, v) \
|
|
|
|
(void *)InterlockedExchange((long *)(t), (long)(v))
|
|
|
|
#endif
|
2007-02-08 23:24:55 +03:00
|
|
|
static void
|
|
|
|
native_thread_destroy(rb_thread_t *th)
|
|
|
|
{
|
2009-12-30 21:30:36 +03:00
|
|
|
HANDLE intr = InterlockedExchangePointer(&th->native_thread_data.interrupt_event, 0);
|
2007-04-10 11:16:42 +04:00
|
|
|
thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id);
|
|
|
|
w32_close_handle(intr);
|
2007-02-08 23:24:55 +03:00
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2013-07-08 18:12:26 +04:00
|
|
|
static unsigned long __stdcall
|
2006-12-31 18:02:22 +03:00
|
|
|
thread_start_func_1(void *th_ptr)
|
|
|
|
{
|
* blockinlining.c, error.c, eval.c, eval_error.h, eval_intern.h,
eval_jump.h, eval_load.c, eval_safe.h, gc.c, proc.c, signal.c,
thread.c, thread_pthread.ci, thread_win32.ci, vm.c, vm.h,
vm_dump.c, vm_evalbody.ci, yarvcore.c, yarvcore.h:
fix typo (rb_thead_t -> rb_thread_t).
* eval_intern.h: remove unused definitions.
* common.mk: fix around vm_opts.h path
and remove harmful argument passed to insns2vm.rb.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11658 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-08 09:37:46 +03:00
|
|
|
rb_thread_t *th = th_ptr;
|
2007-02-09 06:42:46 +03:00
|
|
|
volatile HANDLE thread_id = th->thread_id;
|
|
|
|
|
2008-06-14 06:59:19 +04:00
|
|
|
native_thread_init_stack(th);
|
2007-02-09 06:42:46 +03:00
|
|
|
th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2007-02-09 06:42:46 +03:00
|
|
|
/* run */
|
2006-12-31 18:02:22 +03:00
|
|
|
thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
|
|
|
|
th->thread_id, th->native_thread_data.interrupt_event);
|
2008-11-06 16:21:26 +03:00
|
|
|
|
2014-01-28 10:09:58 +04:00
|
|
|
thread_start_func_2(th, th->machine.stack_start, rb_ia64_bsp());
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2007-02-09 06:42:46 +03:00
|
|
|
w32_close_handle(thread_id);
|
2006-12-31 18:02:22 +03:00
|
|
|
thread_debug("thread deleted (th: %p)\n", th);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
* blockinlining.c, error.c, eval.c, eval_error.h, eval_intern.h,
eval_jump.h, eval_load.c, eval_safe.h, gc.c, proc.c, signal.c,
thread.c, thread_pthread.ci, thread_win32.ci, vm.c, vm.h,
vm_dump.c, vm_evalbody.ci, yarvcore.c, yarvcore.h:
fix typo (rb_thead_t -> rb_thread_t).
* eval_intern.h: remove unused definitions.
* common.mk: fix around vm_opts.h path
and remove harmful argument passed to insns2vm.rb.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11658 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-08 09:37:46 +03:00
|
|
|
native_thread_create(rb_thread_t *th)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2012-06-08 05:40:08 +04:00
|
|
|
size_t stack_size = 4 * 1024; /* 4KB is the minimum commit size */
|
2007-02-08 23:24:55 +03:00
|
|
|
th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2007-02-08 23:24:55 +03:00
|
|
|
if ((th->thread_id) == 0) {
|
2009-11-12 08:33:23 +03:00
|
|
|
return thread_errno;
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
2007-02-09 06:42:46 +03:00
|
|
|
|
|
|
|
w32_resume_thread(th->thread_id);
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
if (THREAD_DEBUG) {
|
|
|
|
Sleep(0);
|
2010-10-12 19:03:51 +04:00
|
|
|
thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %"PRIdSIZE"\n",
|
2006-12-31 18:02:22 +03:00
|
|
|
th, th->thread_id,
|
|
|
|
th->native_thread_data.interrupt_event, stack_size);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-01-07 12:47:52 +03:00
|
|
|
static void
|
|
|
|
native_thread_join(HANDLE th)
|
|
|
|
{
|
2010-08-02 11:35:35 +04:00
|
|
|
w32_wait_events(&th, 1, INFINITE, 0);
|
2007-01-07 12:47:52 +03:00
|
|
|
}
|
|
|
|
|
2008-08-13 11:53:35 +04:00
|
|
|
#if USE_NATIVE_THREAD_PRIORITY
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
static void
|
* blockinlining.c, error.c, eval.c, eval_error.h, eval_intern.h,
eval_jump.h, eval_load.c, eval_safe.h, gc.c, proc.c, signal.c,
thread.c, thread_pthread.ci, thread_win32.ci, vm.c, vm.h,
vm_dump.c, vm_evalbody.ci, yarvcore.c, yarvcore.h:
fix typo (rb_thead_t -> rb_thread_t).
* eval_intern.h: remove unused definitions.
* common.mk: fix around vm_opts.h path
and remove harmful argument passed to insns2vm.rb.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11658 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-08 09:37:46 +03:00
|
|
|
native_thread_apply_priority(rb_thread_t *th)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
|
|
|
int priority = th->priority;
|
|
|
|
if (th->priority > 0) {
|
|
|
|
priority = THREAD_PRIORITY_ABOVE_NORMAL;
|
|
|
|
}
|
|
|
|
else if (th->priority < 0) {
|
|
|
|
priority = THREAD_PRIORITY_BELOW_NORMAL;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
priority = THREAD_PRIORITY_NORMAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
SetThreadPriority(th->thread_id, priority);
|
|
|
|
}
|
|
|
|
|
2008-08-13 11:53:35 +04:00
|
|
|
#endif /* USE_NATIVE_THREAD_PRIORITY */
|
|
|
|
|
2011-06-15 22:04:13 +04:00
|
|
|
int rb_w32_select_with_thread(int, fd_set *, fd_set *, fd_set *, struct timeval *, void *); /* @internal */
|
|
|
|
|
|
|
|
static int
|
|
|
|
native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
|
|
|
|
{
|
|
|
|
fd_set *r = NULL, *w = NULL, *e = NULL;
|
|
|
|
if (readfds) {
|
|
|
|
rb_fd_resize(n - 1, readfds);
|
|
|
|
r = rb_fd_ptr(readfds);
|
|
|
|
}
|
|
|
|
if (writefds) {
|
|
|
|
rb_fd_resize(n - 1, writefds);
|
|
|
|
w = rb_fd_ptr(writefds);
|
|
|
|
}
|
|
|
|
if (exceptfds) {
|
|
|
|
rb_fd_resize(n - 1, exceptfds);
|
|
|
|
e = rb_fd_ptr(exceptfds);
|
|
|
|
}
|
|
|
|
return rb_w32_select_with_thread(n, r, w, e, timeout, th);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* @internal */
|
|
|
|
int
|
|
|
|
rb_w32_check_interrupt(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
return w32_wait_events(0, 0, 0, th);
|
|
|
|
}
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
static void
|
2007-11-20 13:47:53 +03:00
|
|
|
ubf_handle(void *ptr)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2007-11-20 13:47:53 +03:00
|
|
|
rb_thread_t *th = (rb_thread_t *)ptr;
|
2007-02-08 14:51:40 +03:00
|
|
|
thread_debug("ubf_handle: %p\n", th);
|
2008-07-15 13:14:09 +04:00
|
|
|
|
2007-02-09 06:42:46 +03:00
|
|
|
w32_set_event(th->native_thread_data.interrupt_event);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
2014-06-11 12:37:44 +04:00
|
|
|
static struct {
|
|
|
|
HANDLE id;
|
|
|
|
HANDLE lock;
|
|
|
|
} timer_thread;
|
|
|
|
#define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2013-07-08 18:12:26 +04:00
|
|
|
static unsigned long __stdcall
|
2006-12-31 18:02:22 +03:00
|
|
|
timer_thread_func(void *dummy)
|
|
|
|
{
|
|
|
|
thread_debug("timer_thread\n");
|
2014-06-11 12:37:44 +04:00
|
|
|
while (WaitForSingleObject(timer_thread.lock, TIME_QUANTUM_USEC/1000) ==
|
2008-11-06 16:21:26 +03:00
|
|
|
WAIT_TIMEOUT) {
|
2008-07-05 17:22:29 +04:00
|
|
|
timer_thread_function(dummy);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
thread_debug("timer killed\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-06-27 04:30:41 +04:00
|
|
|
void
|
|
|
|
rb_thread_wakeup_timer_thread(void)
|
|
|
|
{
|
|
|
|
/* do nothing */
|
|
|
|
}
|
|
|
|
|
2008-11-06 16:21:26 +03:00
|
|
|
static void
|
2006-12-31 18:02:22 +03:00
|
|
|
rb_thread_create_timer_thread(void)
|
|
|
|
{
|
2014-06-11 12:37:44 +04:00
|
|
|
if (timer_thread.id == 0) {
|
|
|
|
if (!timer_thread.lock) {
|
|
|
|
timer_thread.lock = CreateEvent(0, TRUE, FALSE, 0);
|
2008-11-06 16:21:26 +03:00
|
|
|
}
|
2014-06-11 12:37:44 +04:00
|
|
|
timer_thread.id = w32_create_thread(1024 + (THREAD_DEBUG ? BUFSIZ : 0),
|
2008-11-06 16:21:26 +03:00
|
|
|
timer_thread_func, 0);
|
2014-06-11 12:37:44 +04:00
|
|
|
w32_resume_thread(timer_thread.id);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-08 18:31:05 +03:00
|
|
|
static int
|
2015-08-14 12:44:10 +03:00
|
|
|
native_stop_timer_thread(void)
|
2008-11-08 18:31:05 +03:00
|
|
|
{
|
|
|
|
int stopped = --system_working <= 0;
|
|
|
|
if (stopped) {
|
2014-06-11 12:37:44 +04:00
|
|
|
SetEvent(timer_thread.lock);
|
|
|
|
native_thread_join(timer_thread.id);
|
|
|
|
CloseHandle(timer_thread.lock);
|
|
|
|
timer_thread.lock = 0;
|
2008-11-08 18:31:05 +03:00
|
|
|
}
|
|
|
|
return stopped;
|
|
|
|
}
|
2008-11-06 16:21:26 +03:00
|
|
|
|
2009-11-02 06:58:25 +03:00
|
|
|
static void
|
|
|
|
native_reset_timer_thread(void)
|
|
|
|
{
|
2014-06-11 12:37:44 +04:00
|
|
|
if (timer_thread.id) {
|
|
|
|
CloseHandle(timer_thread.id);
|
|
|
|
timer_thread.id = 0;
|
2009-11-02 06:58:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-21 10:33:41 +04:00
|
|
|
int
|
|
|
|
ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
|
|
|
|
{
|
|
|
|
return rb_thread_raised_p(th, RAISED_STACKOVERFLOW);
|
|
|
|
}
|
|
|
|
|
2014-05-15 12:32:55 +04:00
|
|
|
#if defined(__MINGW32__)
|
|
|
|
LONG WINAPI
|
|
|
|
rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *exception)
|
|
|
|
{
|
|
|
|
if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
|
|
|
|
rb_thread_raised_set(GET_THREAD(), RAISED_STACKOVERFLOW);
|
|
|
|
raise(SIGSEGV);
|
|
|
|
}
|
|
|
|
return EXCEPTION_CONTINUE_SEARCH;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-10-26 13:09:45 +04:00
|
|
|
#ifdef RUBY_ALLOCA_CHKSTK
|
|
|
|
void
|
|
|
|
ruby_alloca_chkstk(size_t len, void *sp)
|
|
|
|
{
|
|
|
|
if (ruby_stack_length(NULL) * sizeof(VALUE) >= len) {
|
|
|
|
rb_thread_t *th = GET_THREAD();
|
|
|
|
if (!rb_thread_raised_p(th, RAISED_STACKOVERFLOW)) {
|
|
|
|
rb_thread_raised_set(th, RAISED_STACKOVERFLOW);
|
|
|
|
rb_exc_raise(sysstack_error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2011-07-04 01:56:59 +04:00
|
|
|
int
|
|
|
|
rb_reserved_fd_p(int fd)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2013-07-23 14:46:37 +04:00
|
|
|
|
|
|
|
rb_nativethread_id_t
|
|
|
|
rb_nativethread_self(void)
|
|
|
|
{
|
|
|
|
return GetCurrentThread();
|
|
|
|
}
|
|
|
|
|
2014-09-21 09:14:47 +04:00
|
|
|
static void
|
|
|
|
native_set_thread_name(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|