2006-12-31 18:02:22 +03:00
|
|
|
/* -*-c-*- */
|
|
|
|
/**********************************************************************
|
|
|
|
|
2007-12-20 12:29:46 +03:00
|
|
|
thread_pthread.c -
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
$Author$
|
|
|
|
|
* blockinlining.c, compile.c, compile.h, debug.c, debug.h,
id.c, insnhelper.h, insns.def, thread.c, thread_pthread.ci,
thread_pthread.h, thread_win32.ci, thread_win32.h, vm.h,
vm_dump.c, vm_evalbody.ci, vm_opts.h: fix comments and
copyright year.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@13920 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-11-14 01:13:04 +03:00
|
|
|
Copyright (C) 2004-2007 Koichi Sasada
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
|
|
|
|
|
2023-02-08 14:56:53 +03:00
|
|
|
#include "internal/gc.h"
|
2023-03-07 10:17:25 +03:00
|
|
|
#include "rjit.h"
|
2008-06-14 06:59:19 +04:00
|
|
|
|
|
|
|
#ifdef HAVE_SYS_RESOURCE_H
|
|
|
|
#include <sys/resource.h>
|
|
|
|
#endif
|
2011-06-16 04:12:55 +04:00
|
|
|
#ifdef HAVE_THR_STKSEGMENT
|
|
|
|
#include <thread.h>
|
|
|
|
#endif
|
2021-04-29 16:12:44 +03:00
|
|
|
#if defined(HAVE_FCNTL_H)
|
2011-06-27 04:30:41 +04:00
|
|
|
#include <fcntl.h>
|
2021-04-29 16:12:44 +03:00
|
|
|
#elif defined(HAVE_SYS_FCNTL_H)
|
2011-06-27 04:30:41 +04:00
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#endif
|
2012-07-19 22:43:24 +04:00
|
|
|
#ifdef HAVE_SYS_PRCTL_H
|
2011-12-01 02:17:48 +04:00
|
|
|
#include <sys/prctl.h>
|
|
|
|
#endif
|
2013-03-16 09:06:47 +04:00
|
|
|
#if defined(HAVE_SYS_TIME_H)
|
|
|
|
#include <sys/time.h>
|
|
|
|
#endif
|
2015-11-23 06:54:42 +03:00
|
|
|
#if defined(__HAIKU__)
|
|
|
|
#include <kernel/OS.h>
|
|
|
|
#endif
|
2021-05-22 15:36:27 +03:00
|
|
|
#ifdef __linux__
|
|
|
|
#include <sys/syscall.h> /* for SYS_gettid */
|
|
|
|
#endif
|
2018-08-14 00:34:24 +03:00
|
|
|
#include <time.h>
|
|
|
|
#include <signal.h>
|
|
|
|
|
2022-05-23 05:04:42 +03:00
|
|
|
#if defined __APPLE__
|
|
|
|
# include <AvailabilityMacros.h>
|
|
|
|
#endif
|
|
|
|
|
2018-08-24 22:19:01 +03:00
|
|
|
#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
|
|
|
|
# define USE_EVENTFD (1)
|
|
|
|
# include <sys/eventfd.h>
|
|
|
|
#else
|
|
|
|
# define USE_EVENTFD (0)
|
|
|
|
#endif
|
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
|
|
|
|
defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
|
|
|
|
defined(HAVE_CLOCK_GETTIME)
|
|
|
|
static pthread_condattr_t condattr_mono;
|
|
|
|
static pthread_condattr_t *condattr_monotonic = &condattr_mono;
|
|
|
|
#else
|
|
|
|
static const void *const condattr_monotonic = NULL;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// native thread wrappers
|
|
|
|
|
|
|
|
#define NATIVE_MUTEX_LOCK_DEBUG 0
|
|
|
|
|
|
|
|
static void
|
|
|
|
mutex_debug(const char *msg, void *lock)
|
|
|
|
{
|
|
|
|
if (NATIVE_MUTEX_LOCK_DEBUG) {
|
|
|
|
int r;
|
|
|
|
static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
|
|
|
|
if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
|
|
|
|
fprintf(stdout, "%s: %p\n", msg, lock);
|
|
|
|
if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_mutex_lock(pthread_mutex_t *lock)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
mutex_debug("lock", lock);
|
|
|
|
if ((r = pthread_mutex_lock(lock)) != 0) {
|
|
|
|
rb_bug_errno("pthread_mutex_lock", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_mutex_unlock(pthread_mutex_t *lock)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
mutex_debug("unlock", lock);
|
|
|
|
if ((r = pthread_mutex_unlock(lock)) != 0) {
|
|
|
|
rb_bug_errno("pthread_mutex_unlock", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rb_native_mutex_trylock(pthread_mutex_t *lock)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
mutex_debug("trylock", lock);
|
|
|
|
if ((r = pthread_mutex_trylock(lock)) != 0) {
|
|
|
|
if (r == EBUSY) {
|
|
|
|
return EBUSY;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_bug_errno("pthread_mutex_trylock", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_mutex_initialize(pthread_mutex_t *lock)
|
|
|
|
{
|
|
|
|
int r = pthread_mutex_init(lock, 0);
|
|
|
|
mutex_debug("init", lock);
|
|
|
|
if (r != 0) {
|
|
|
|
rb_bug_errno("pthread_mutex_init", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_mutex_destroy(pthread_mutex_t *lock)
|
|
|
|
{
|
|
|
|
int r = pthread_mutex_destroy(lock);
|
|
|
|
mutex_debug("destroy", lock);
|
|
|
|
if (r != 0) {
|
|
|
|
rb_bug_errno("pthread_mutex_destroy", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_initialize(rb_nativethread_cond_t *cond)
|
|
|
|
{
|
|
|
|
int r = pthread_cond_init(cond, condattr_monotonic);
|
|
|
|
if (r != 0) {
|
|
|
|
rb_bug_errno("pthread_cond_init", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_destroy(rb_nativethread_cond_t *cond)
|
|
|
|
{
|
|
|
|
int r = pthread_cond_destroy(cond);
|
|
|
|
if (r != 0) {
|
|
|
|
rb_bug_errno("pthread_cond_destroy", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
|
|
|
|
* EAGAIN after retrying 8192 times. You can see them in the following page:
|
|
|
|
*
|
|
|
|
* http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
|
|
|
|
*
|
|
|
|
* The following rb_native_cond_signal and rb_native_cond_broadcast functions
|
|
|
|
* need to retrying until pthread functions don't return EAGAIN.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_signal(rb_nativethread_cond_t *cond)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
do {
|
|
|
|
r = pthread_cond_signal(cond);
|
|
|
|
} while (r == EAGAIN);
|
|
|
|
if (r != 0) {
|
|
|
|
rb_bug_errno("pthread_cond_signal", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
do {
|
|
|
|
r = pthread_cond_broadcast(cond);
|
|
|
|
} while (r == EAGAIN);
|
|
|
|
if (r != 0) {
|
|
|
|
rb_bug_errno("rb_native_cond_broadcast", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
|
|
|
|
{
|
|
|
|
int r = pthread_cond_wait(cond, mutex);
|
|
|
|
if (r != 0) {
|
|
|
|
rb_bug_errno("pthread_cond_wait", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
struct timespec ts;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* An old Linux may return EINTR. Even though POSIX says
|
|
|
|
* "These functions shall not return an error code of [EINTR]".
|
|
|
|
* http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
|
|
|
|
* Let's hide it from arch generic code.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
rb_hrtime2timespec(&ts, abs);
|
|
|
|
r = pthread_cond_timedwait(cond, mutex, &ts);
|
|
|
|
} while (r == EINTR);
|
|
|
|
|
|
|
|
if (r != 0 && r != ETIMEDOUT) {
|
|
|
|
rb_bug_errno("pthread_cond_timedwait", r);
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static rb_hrtime_t
|
|
|
|
native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
|
|
|
|
{
|
|
|
|
if (condattr_monotonic) {
|
|
|
|
return rb_hrtime_add(rb_hrtime_now(), rel);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
struct timespec ts;
|
|
|
|
|
|
|
|
rb_timespec_now(&ts);
|
|
|
|
return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
|
|
|
|
{
|
|
|
|
rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
|
|
|
|
native_cond_timedwait(cond, mutex, &hrmsec);
|
|
|
|
}
|
|
|
|
|
|
|
|
// thread scheduling
|
|
|
|
|
|
|
|
static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
|
|
|
|
static void rb_thread_execute_hooks(rb_event_flag_t event);
|
|
|
|
#define RB_INTERNAL_THREAD_HOOK(event) if (rb_internal_thread_event_hooks) { rb_thread_execute_hooks(event); }
|
|
|
|
|
|
|
|
static rb_serial_t current_fork_gen = 1; /* We can't use GET_VM()->fork_gen */
|
|
|
|
|
2021-01-23 07:23:26 +03:00
|
|
|
#if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__EMSCRIPTEN__)
|
2018-08-14 00:34:24 +03:00
|
|
|
# define USE_UBF_LIST 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* UBF_TIMER and ubf_list both use SIGVTALRM.
|
|
|
|
*
|
2018-08-15 10:16:55 +03:00
|
|
|
* UBF_TIMER has NOTHING to do with thread timeslices (TIMER_INTERRUPT_MASK)
|
|
|
|
*
|
|
|
|
* UBF_TIMER is to close TOCTTOU signal race on programs where we
|
|
|
|
* cannot rely on GVL contention (vm->gvl.timer) to perform wakeups
|
|
|
|
* while a thread is doing blocking I/O on sockets or pipes. With
|
|
|
|
* rb_thread_call_without_gvl and similar functions:
|
|
|
|
*
|
|
|
|
* (1) Check interrupts.
|
|
|
|
* (2) release GVL.
|
|
|
|
* (2a) signal received
|
|
|
|
* (3) call func with data1 (blocks for a long time without ubf_timer)
|
|
|
|
* (4) acquire GVL.
|
|
|
|
* Other Ruby threads can not run in parallel any more.
|
|
|
|
* (5) Check interrupts.
|
|
|
|
*
|
|
|
|
* We need UBF_TIMER to break out of (3) if (2a) happens.
|
|
|
|
*
|
|
|
|
* ubf_list wakeups may be triggered on gvl_yield.
|
|
|
|
*
|
|
|
|
* If we have vm->gvl.timer (on GVL contention), we don't need UBF_TIMER
|
|
|
|
* as it can perform the same tasks while doing timeslices.
|
2018-08-14 00:34:24 +03:00
|
|
|
*/
|
|
|
|
#define UBF_TIMER_NONE 0
|
|
|
|
#define UBF_TIMER_POSIX 1
|
|
|
|
#define UBF_TIMER_PTHREAD 2
|
|
|
|
|
|
|
|
#ifndef UBF_TIMER
|
|
|
|
# if defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_CREATE) && \
|
2018-08-15 01:06:40 +03:00
|
|
|
defined(CLOCK_MONOTONIC) && defined(USE_UBF_LIST)
|
2018-08-14 00:34:24 +03:00
|
|
|
/* preferred */
|
|
|
|
# define UBF_TIMER UBF_TIMER_POSIX
|
|
|
|
# elif defined(USE_UBF_LIST)
|
|
|
|
/* safe, but inefficient */
|
|
|
|
# define UBF_TIMER UBF_TIMER_PTHREAD
|
|
|
|
# else
|
|
|
|
/* we'll be racy without SIGVTALRM for ubf_list */
|
|
|
|
# define UBF_TIMER UBF_TIMER_NONE
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
2018-12-16 10:51:09 +03:00
|
|
|
enum rtimer_state {
|
|
|
|
/* alive, after timer_create: */
|
|
|
|
RTIMER_DISARM,
|
|
|
|
RTIMER_ARMING,
|
|
|
|
RTIMER_ARMED,
|
|
|
|
|
|
|
|
RTIMER_DEAD
|
|
|
|
};
|
|
|
|
|
2018-08-14 00:34:24 +03:00
|
|
|
#if UBF_TIMER == UBF_TIMER_POSIX
|
2018-12-16 10:51:09 +03:00
|
|
|
static const struct itimerspec zero;
|
2018-08-14 00:34:24 +03:00
|
|
|
static struct {
|
2021-02-02 12:23:07 +03:00
|
|
|
rb_atomic_t state_; /* rtimer_state */
|
2023-03-03 10:40:00 +03:00
|
|
|
rb_serial_t fork_gen;
|
2018-12-16 10:51:09 +03:00
|
|
|
timer_t timerid;
|
|
|
|
} timer_posix = {
|
|
|
|
/* .state = */ RTIMER_DEAD,
|
|
|
|
};
|
|
|
|
|
2021-02-02 12:23:07 +03:00
|
|
|
#define TIMER_STATE_DEBUG 0
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
rtimer_state_name(enum rtimer_state state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case RTIMER_DISARM: return "disarm";
|
|
|
|
case RTIMER_ARMING: return "arming";
|
|
|
|
case RTIMER_ARMED: return "armed";
|
|
|
|
case RTIMER_DEAD: return "dead";
|
|
|
|
default: rb_bug("unreachable");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum rtimer_state
|
|
|
|
timer_state_exchange(enum rtimer_state state)
|
|
|
|
{
|
|
|
|
enum rtimer_state prev = ATOMIC_EXCHANGE(timer_posix.state_, state);
|
|
|
|
if (TIMER_STATE_DEBUG) fprintf(stderr, "state (exc): %s->%s\n", rtimer_state_name(prev), rtimer_state_name(state));
|
|
|
|
return prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum rtimer_state
|
|
|
|
timer_state_cas(enum rtimer_state expected_prev, enum rtimer_state state)
|
|
|
|
{
|
|
|
|
enum rtimer_state prev = ATOMIC_CAS(timer_posix.state_, expected_prev, state);
|
|
|
|
|
|
|
|
if (TIMER_STATE_DEBUG) {
|
|
|
|
if (prev == expected_prev) {
|
|
|
|
fprintf(stderr, "state (cas): %s->%s\n", rtimer_state_name(prev), rtimer_state_name(state));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
fprintf(stderr, "state (cas): %s (expected:%s)\n", rtimer_state_name(prev), rtimer_state_name(expected_prev));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return prev;
|
|
|
|
}
|
|
|
|
|
2018-08-14 00:34:24 +03:00
|
|
|
#elif UBF_TIMER == UBF_TIMER_PTHREAD
|
|
|
|
static void *timer_pthread_fn(void *);
|
|
|
|
static struct {
|
|
|
|
int low[2];
|
|
|
|
rb_atomic_t armed; /* boolean */
|
2023-03-03 10:40:00 +03:00
|
|
|
rb_serial_t fork_gen;
|
2018-08-14 00:34:24 +03:00
|
|
|
pthread_t thid;
|
|
|
|
} timer_pthread = {
|
|
|
|
{ -1, -1 },
|
|
|
|
};
|
|
|
|
#endif
|
2008-06-14 06:59:19 +04:00
|
|
|
|
2018-08-25 09:58:35 +03:00
|
|
|
static const rb_hrtime_t *sigwait_timeout(rb_thread_t *, int sigwait_fd,
|
|
|
|
const rb_hrtime_t *,
|
2018-08-14 00:34:20 +03:00
|
|
|
int *drained_p);
|
2018-08-15 07:32:46 +03:00
|
|
|
static void ubf_timer_disarm(void);
|
2018-08-16 11:26:51 +03:00
|
|
|
static void threadptr_trap_interrupt(rb_thread_t *);
|
2020-03-09 20:22:11 +03:00
|
|
|
static void ubf_wakeup_all_threads(void);
|
|
|
|
static int ubf_threads_empty(void);
|
2018-07-11 11:49:18 +03:00
|
|
|
|
2023-03-03 10:40:00 +03:00
|
|
|
#define TIMER_THREAD_CREATED_P() (signal_self_pipe.fork_gen == current_fork_gen)
|
2018-07-11 11:49:18 +03:00
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
/* for testing, and in case we come across a platform w/o pipes: */
|
|
|
|
#define BUSY_WAIT_SIGNALS (0)
|
2018-08-28 02:39:58 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* sigwait_th is the thread which owns sigwait_fd and sleeps on it
|
2023-03-07 10:15:30 +03:00
|
|
|
* (using ppoll). RJIT worker can be sigwait_th==0, so we initialize
|
2018-08-28 02:39:58 +03:00
|
|
|
* it to THREAD_INVALID at startup and fork time. It is the ONLY thread
|
|
|
|
* allowed to read from sigwait_fd, otherwise starvation can occur.
|
|
|
|
*/
|
2018-08-14 00:34:20 +03:00
|
|
|
#define THREAD_INVALID ((const rb_thread_t *)-1)
|
|
|
|
static const rb_thread_t *sigwait_th;
|
2007-12-25 07:35:17 +03:00
|
|
|
|
2018-07-29 13:15:11 +03:00
|
|
|
#ifdef HAVE_SCHED_YIELD
|
|
|
|
#define native_thread_yield() (void)sched_yield()
|
|
|
|
#else
|
|
|
|
#define native_thread_yield() ((void)0)
|
|
|
|
#endif
|
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
/* 100ms. 10ms is too small for user level thread scheduling
|
|
|
|
* on recent Linux (tested on 2.6.35)
|
|
|
|
*/
|
2018-08-14 00:34:24 +03:00
|
|
|
#define TIME_QUANTUM_MSEC (100)
|
|
|
|
#define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
|
|
|
|
#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
|
2018-08-14 00:34:20 +03:00
|
|
|
|
2018-08-14 00:34:24 +03:00
|
|
|
/*
|
2022-04-16 21:40:23 +03:00
|
|
|
* Designate the next sched.timer thread, favor the last thread in
|
|
|
|
* the readyq since it will be in readyq longest
|
2018-08-14 00:34:24 +03:00
|
|
|
*/
|
|
|
|
static int
|
2022-04-16 21:40:23 +03:00
|
|
|
designate_timer_thread(struct rb_thread_sched *sched)
|
2018-08-14 00:34:24 +03:00
|
|
|
{
|
2022-04-22 15:19:03 +03:00
|
|
|
rb_thread_t *last;
|
2018-08-14 00:34:24 +03:00
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
last = ccan_list_tail(&sched->readyq, rb_thread_t, sched.node.readyq);
|
2022-04-16 21:40:23 +03:00
|
|
|
|
2018-08-14 00:34:24 +03:00
|
|
|
if (last) {
|
2022-04-22 15:19:03 +03:00
|
|
|
rb_native_cond_signal(&last->nt->cond.readyq);
|
2018-08-14 00:34:24 +03:00
|
|
|
return TRUE;
|
|
|
|
}
|
2022-04-22 15:19:03 +03:00
|
|
|
else {
|
|
|
|
return FALSE;
|
|
|
|
}
|
2018-08-14 00:34:24 +03:00
|
|
|
}
|
|
|
|
|
2018-08-15 10:16:55 +03:00
|
|
|
/*
|
2018-08-28 03:24:08 +03:00
|
|
|
* We become designated timer thread to kick vm->gvl.owner
|
2018-08-15 10:16:55 +03:00
|
|
|
* periodically. Continue on old timeout if it expired.
|
|
|
|
*/
|
|
|
|
static void
|
2022-04-16 21:40:23 +03:00
|
|
|
do_gvl_timer(struct rb_thread_sched *sched, rb_thread_t *th)
|
2018-08-15 10:16:55 +03:00
|
|
|
{
|
2020-11-11 08:37:31 +03:00
|
|
|
rb_vm_t *vm = GET_VM();
|
2018-08-25 09:58:35 +03:00
|
|
|
static rb_hrtime_t abs;
|
2018-08-15 10:16:55 +03:00
|
|
|
|
2022-04-16 21:40:23 +03:00
|
|
|
sched->timer = th;
|
2018-08-27 20:17:08 +03:00
|
|
|
|
2018-08-15 10:16:55 +03:00
|
|
|
/* take over wakeups from UBF_TIMER */
|
|
|
|
ubf_timer_disarm();
|
|
|
|
|
2022-04-16 21:40:23 +03:00
|
|
|
if (sched->timer_err == ETIMEDOUT) {
|
2022-04-22 15:19:03 +03:00
|
|
|
abs = native_cond_timeout(&th->nt->cond.readyq, TIME_QUANTUM_NSEC);
|
2018-08-15 10:16:55 +03:00
|
|
|
}
|
2022-04-22 15:19:03 +03:00
|
|
|
sched->timer_err = native_cond_timedwait(&th->nt->cond.readyq, &sched->lock, &abs);
|
2018-08-16 11:26:51 +03:00
|
|
|
|
2018-08-15 10:16:55 +03:00
|
|
|
ubf_wakeup_all_threads();
|
2020-03-09 20:22:11 +03:00
|
|
|
|
2018-08-16 11:26:51 +03:00
|
|
|
if (UNLIKELY(rb_signal_buff_size())) {
|
2020-11-11 08:37:31 +03:00
|
|
|
if (th == vm->ractor.main_thread) {
|
2018-08-16 11:26:51 +03:00
|
|
|
RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
|
|
|
|
}
|
|
|
|
else {
|
2020-11-11 08:37:31 +03:00
|
|
|
threadptr_trap_interrupt(vm->ractor.main_thread);
|
2018-08-16 11:26:51 +03:00
|
|
|
}
|
|
|
|
}
|
2018-08-15 10:16:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Timeslice. Warning: the process may fork while this
|
|
|
|
* thread is contending for GVL:
|
|
|
|
*/
|
2022-04-16 21:40:23 +03:00
|
|
|
const rb_thread_t *running;
|
|
|
|
if ((running = sched->running) != 0) {
|
|
|
|
// strictly speaking, accessing "running" is not thread-safe
|
|
|
|
RUBY_VM_SET_TIMER_INTERRUPT(running->ec);
|
2020-11-11 08:37:31 +03:00
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
sched->timer = 0;
|
2018-08-15 10:16:55 +03:00
|
|
|
}
|
|
|
|
|
2010-11-27 23:15:59 +03:00
|
|
|
static void
|
2022-04-22 15:19:03 +03:00
|
|
|
thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th)
|
2010-11-27 23:15:59 +03:00
|
|
|
{
|
2022-04-22 15:19:03 +03:00
|
|
|
ccan_list_add_tail(&sched->readyq, &th->sched.node.readyq);
|
2022-04-16 21:40:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
|
|
|
|
{
|
2022-06-15 15:37:41 +03:00
|
|
|
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY);
|
2022-04-16 21:40:23 +03:00
|
|
|
if (sched->running) {
|
2018-08-15 08:31:31 +03:00
|
|
|
VM_ASSERT(th->unblock.func == 0 &&
|
2022-04-22 15:19:03 +03:00
|
|
|
"we must not be in ubf_list and GVL readyq at the same time");
|
2018-08-14 00:34:20 +03:00
|
|
|
|
2022-04-16 21:40:23 +03:00
|
|
|
// waiting -> ready
|
2022-04-22 15:19:03 +03:00
|
|
|
thread_sched_to_ready_common(sched, th);
|
2018-08-15 10:16:55 +03:00
|
|
|
|
2022-04-16 21:40:23 +03:00
|
|
|
// wait for running chance
|
2018-08-14 00:34:20 +03:00
|
|
|
do {
|
2022-04-16 21:40:23 +03:00
|
|
|
if (!sched->timer) {
|
|
|
|
do_gvl_timer(sched, th);
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
|
|
|
else {
|
2022-04-22 15:19:03 +03:00
|
|
|
rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock);
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
} while (sched->running);
|
2018-08-14 00:34:20 +03:00
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
ccan_list_del_init(&th->sched.node.readyq);
|
2018-08-14 00:34:20 +03:00
|
|
|
|
2022-04-16 21:40:23 +03:00
|
|
|
if (sched->need_yield) {
|
|
|
|
sched->need_yield = 0;
|
|
|
|
rb_native_cond_signal(&sched->switch_cond);
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
2010-11-27 23:15:59 +03:00
|
|
|
}
|
2018-08-19 03:01:08 +03:00
|
|
|
else { /* reset timer if uncontended */
|
2022-04-16 21:40:23 +03:00
|
|
|
sched->timer_err = ETIMEDOUT;
|
2018-08-19 03:01:08 +03:00
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
|
|
|
|
// ready -> running
|
|
|
|
sched->running = th;
|
|
|
|
|
2022-06-15 15:37:41 +03:00
|
|
|
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED);
|
2023-03-31 11:12:46 +03:00
|
|
|
|
|
|
|
if (!sched->timer) {
|
|
|
|
if (!designate_timer_thread(sched) && !ubf_threads_empty()) {
|
|
|
|
rb_thread_wakeup_timer_thread(-1);
|
|
|
|
}
|
2007-08-27 20:48:14 +04:00
|
|
|
}
|
2012-05-17 07:54:50 +04:00
|
|
|
}
|
2007-08-27 20:48:14 +04:00
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
static void
|
|
|
|
thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
|
2007-08-27 20:48:14 +04:00
|
|
|
{
|
2023-03-31 11:12:46 +03:00
|
|
|
rb_native_mutex_lock(&sched->lock);
|
|
|
|
thread_sched_to_running_common(sched, th);
|
|
|
|
rb_native_mutex_unlock(&sched->lock);
|
2007-08-27 20:48:14 +04:00
|
|
|
}
|
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
static rb_thread_t *
|
2023-03-31 11:57:25 +03:00
|
|
|
thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th)
|
2007-08-27 20:48:14 +04:00
|
|
|
{
|
2023-03-31 11:12:46 +03:00
|
|
|
rb_thread_t *next;
|
|
|
|
sched->running = NULL;
|
|
|
|
next = ccan_list_top(&sched->readyq, rb_thread_t, sched.node.readyq);
|
|
|
|
if (next) rb_native_cond_signal(&next->nt->cond.readyq);
|
|
|
|
|
|
|
|
return next;
|
2007-08-27 20:48:14 +04:00
|
|
|
}
|
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
static void
|
2023-03-31 11:57:25 +03:00
|
|
|
thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
|
2007-08-27 20:48:14 +04:00
|
|
|
{
|
2023-03-31 11:12:46 +03:00
|
|
|
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED);
|
|
|
|
rb_native_mutex_lock(&sched->lock);
|
2023-03-31 11:57:25 +03:00
|
|
|
thread_sched_to_waiting_common(sched, th);
|
2023-03-31 11:12:46 +03:00
|
|
|
rb_native_mutex_unlock(&sched->lock);
|
2007-08-27 20:48:14 +04:00
|
|
|
}
|
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
static void
|
2023-03-31 11:57:25 +03:00
|
|
|
thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
|
2007-08-27 20:48:14 +04:00
|
|
|
{
|
2023-03-31 11:12:46 +03:00
|
|
|
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED);
|
2023-03-31 11:57:25 +03:00
|
|
|
thread_sched_to_waiting(sched, th);
|
2007-08-27 20:48:14 +04:00
|
|
|
}
|
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
static void
|
|
|
|
thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
|
2008-11-07 19:14:48 +03:00
|
|
|
{
|
2023-03-31 11:12:46 +03:00
|
|
|
rb_thread_t *next;
|
2011-05-08 14:46:27 +04:00
|
|
|
|
|
|
|
/*
|
2023-03-31 11:12:46 +03:00
|
|
|
* Perhaps other threads are stuck in blocking region w/o GVL, too,
|
|
|
|
* (perhaps looping in io_close_fptr) so we kick them:
|
2011-05-08 14:46:27 +04:00
|
|
|
*/
|
2023-03-31 11:12:46 +03:00
|
|
|
ubf_wakeup_all_threads();
|
|
|
|
rb_native_mutex_lock(&sched->lock);
|
2023-03-31 11:57:25 +03:00
|
|
|
next = thread_sched_to_waiting_common(sched, th);
|
2011-05-08 14:46:27 +04:00
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
/* An another thread is processing GVL yield. */
|
|
|
|
if (UNLIKELY(sched->wait_yield)) {
|
|
|
|
while (sched->wait_yield)
|
|
|
|
rb_native_cond_wait(&sched->switch_wait_cond, &sched->lock);
|
2010-11-28 15:55:43 +03:00
|
|
|
}
|
2023-03-31 11:12:46 +03:00
|
|
|
else if (next) {
|
|
|
|
/* Wait until another thread task takes GVL. */
|
|
|
|
sched->need_yield = 1;
|
|
|
|
sched->wait_yield = 1;
|
|
|
|
while (sched->need_yield)
|
|
|
|
rb_native_cond_wait(&sched->switch_cond, &sched->lock);
|
|
|
|
sched->wait_yield = 0;
|
|
|
|
rb_native_cond_broadcast(&sched->switch_wait_cond);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_native_mutex_unlock(&sched->lock);
|
|
|
|
native_thread_yield();
|
|
|
|
rb_native_mutex_lock(&sched->lock);
|
|
|
|
rb_native_cond_broadcast(&sched->switch_wait_cond);
|
|
|
|
}
|
|
|
|
thread_sched_to_running_common(sched, th);
|
|
|
|
rb_native_mutex_unlock(&sched->lock);
|
2008-11-07 19:14:48 +03:00
|
|
|
}
|
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
void
|
2023-03-31 11:12:46 +03:00
|
|
|
rb_thread_sched_init(struct rb_thread_sched *sched)
|
2020-03-09 20:22:11 +03:00
|
|
|
{
|
2023-03-31 11:12:46 +03:00
|
|
|
rb_native_mutex_initialize(&sched->lock);
|
|
|
|
rb_native_cond_initialize(&sched->switch_cond);
|
|
|
|
rb_native_cond_initialize(&sched->switch_wait_cond);
|
|
|
|
ccan_list_head_init(&sched->readyq);
|
|
|
|
sched->running = NULL;
|
|
|
|
sched->timer = 0;
|
|
|
|
sched->timer_err = ETIMEDOUT;
|
|
|
|
sched->need_yield = 0;
|
|
|
|
sched->wait_yield = 0;
|
2020-03-09 20:22:11 +03:00
|
|
|
}
|
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
#if 0
|
|
|
|
// TODO
|
2011-05-06 22:17:14 +04:00
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
static void clear_thread_cache_altstack(void);
|
|
|
|
|
|
|
|
static void
|
|
|
|
rb_thread_sched_destroy(struct rb_thread_sched *sched)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* only called once at VM shutdown (not atfork), another thread
|
|
|
|
* may still grab vm->gvl.lock when calling gvl_release at
|
|
|
|
* the end of thread_start_func_2
|
|
|
|
*/
|
|
|
|
if (0) {
|
|
|
|
rb_native_cond_destroy(&sched->switch_wait_cond);
|
|
|
|
rb_native_cond_destroy(&sched->switch_cond);
|
|
|
|
rb_native_mutex_destroy(&sched->lock);
|
2018-08-25 09:58:35 +03:00
|
|
|
}
|
2023-03-31 11:12:46 +03:00
|
|
|
clear_thread_cache_altstack();
|
2011-05-06 21:39:32 +04:00
|
|
|
}
|
2023-03-31 11:12:46 +03:00
|
|
|
#endif
|
2011-05-06 21:39:32 +04:00
|
|
|
|
2023-03-31 11:12:46 +03:00
|
|
|
#if defined(HAVE_WORKING_FORK)
|
|
|
|
static void thread_cache_reset(void);
|
|
|
|
static void
|
|
|
|
thread_sched_atfork(struct rb_thread_sched *sched)
|
|
|
|
{
|
|
|
|
current_fork_gen++;
|
|
|
|
thread_cache_reset();
|
|
|
|
rb_thread_sched_init(sched);
|
|
|
|
thread_sched_to_running(sched, GET_THREAD());
|
|
|
|
}
|
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2020-10-19 10:47:32 +03:00
|
|
|
#ifdef RB_THREAD_LOCAL_SPECIFIER
|
|
|
|
static RB_THREAD_LOCAL_SPECIFIER rb_thread_t *ruby_native_thread;
|
|
|
|
#else
|
2007-08-18 12:40:13 +04:00
|
|
|
static pthread_key_t ruby_native_thread_key;
|
2020-10-19 10:47:32 +03:00
|
|
|
#endif
|
2007-08-18 12:40:13 +04:00
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
static void
|
2007-12-21 11:13:39 +03:00
|
|
|
null_func(int i)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2007-02-08 14:51:40 +03:00
|
|
|
/* null */
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
2021-06-26 01:17:26 +03:00
|
|
|
rb_thread_t *
|
2007-08-18 12:40:13 +04:00
|
|
|
ruby_thread_from_native(void)
|
|
|
|
{
|
2020-10-19 10:47:32 +03:00
|
|
|
#ifdef RB_THREAD_LOCAL_SPECIFIER
|
|
|
|
return ruby_native_thread;
|
|
|
|
#else
|
2007-08-18 12:40:13 +04:00
|
|
|
return pthread_getspecific(ruby_native_thread_key);
|
2020-10-19 10:47:32 +03:00
|
|
|
#endif
|
2007-08-18 12:40:13 +04:00
|
|
|
}
|
|
|
|
|
2021-06-26 01:17:26 +03:00
|
|
|
int
|
2007-08-18 12:40:13 +04:00
|
|
|
ruby_thread_set_native(rb_thread_t *th)
|
|
|
|
{
|
2022-04-22 15:19:03 +03:00
|
|
|
if (th) {
|
|
|
|
#ifdef USE_UBF_LIST
|
|
|
|
ccan_list_node_init(&th->sched.node.ubf);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup TLS
|
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
if (th && th->ec) {
|
|
|
|
rb_ractor_set_current_ec(th->ractor, th->ec);
|
|
|
|
}
|
2020-10-19 10:47:32 +03:00
|
|
|
#ifdef RB_THREAD_LOCAL_SPECIFIER
|
|
|
|
ruby_native_thread = th;
|
|
|
|
return 1;
|
|
|
|
#else
|
2007-08-18 12:40:13 +04:00
|
|
|
return pthread_setspecific(ruby_native_thread_key, th) == 0;
|
2020-10-19 10:47:32 +03:00
|
|
|
#endif
|
2007-08-18 12:40:13 +04:00
|
|
|
}
|
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
|
|
|
static int
|
|
|
|
get_native_thread_id(void)
|
|
|
|
{
|
|
|
|
#ifdef __linux__
|
|
|
|
return (int)syscall(SYS_gettid);
|
|
|
|
#elif defined(__FreeBSD__)
|
|
|
|
return pthread_getthreadid_np();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void
|
|
|
|
native_thread_init(struct rb_native_thread *nt)
|
|
|
|
{
|
|
|
|
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
|
|
|
nt->tid = get_native_thread_id();
|
|
|
|
#endif
|
|
|
|
rb_native_cond_initialize(&nt->cond.readyq);
|
|
|
|
if (&nt->cond.readyq != &nt->cond.intr)
|
|
|
|
rb_native_cond_initialize(&nt->cond.intr);
|
|
|
|
}
|
2010-11-28 15:46:27 +03:00
|
|
|
|
2010-06-06 03:26:43 +04:00
|
|
|
void
|
2022-04-22 15:19:03 +03:00
|
|
|
Init_native_thread(rb_thread_t *main_th)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2018-04-22 15:09:07 +03:00
|
|
|
#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
|
|
|
|
if (condattr_monotonic) {
|
2022-06-30 16:54:26 +03:00
|
|
|
int r = pthread_condattr_init(condattr_monotonic);
|
2018-06-02 00:56:11 +03:00
|
|
|
if (r == 0) {
|
|
|
|
r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
|
|
|
|
}
|
2018-04-23 05:07:11 +03:00
|
|
|
if (r) condattr_monotonic = NULL;
|
2018-04-22 15:09:07 +03:00
|
|
|
}
|
|
|
|
#endif
|
2020-10-19 10:47:32 +03:00
|
|
|
|
|
|
|
#ifndef RB_THREAD_LOCAL_SPECIFIER
|
2020-03-09 20:22:11 +03:00
|
|
|
if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
|
|
|
|
rb_bug("pthread_key_create failed (ruby_native_thread_key)");
|
|
|
|
}
|
|
|
|
if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
|
|
|
|
rb_bug("pthread_key_create failed (ruby_current_ec_key)");
|
|
|
|
}
|
2020-10-19 10:47:32 +03:00
|
|
|
#endif
|
2010-11-28 15:46:27 +03:00
|
|
|
posix_signal(SIGVTALRM, null_func);
|
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
// setup main thread
|
|
|
|
main_th->nt->thread_id = pthread_self();
|
|
|
|
ruby_thread_set_native(main_th);
|
|
|
|
native_thread_init(main_th->nt);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
2018-07-03 11:30:16 +03:00
|
|
|
#ifndef USE_THREAD_CACHE
|
|
|
|
#define USE_THREAD_CACHE 1
|
|
|
|
#endif
|
|
|
|
|
2007-02-08 23:24:55 +03:00
|
|
|
static void
|
|
|
|
native_thread_destroy(rb_thread_t *th)
|
|
|
|
{
|
2022-04-22 15:19:03 +03:00
|
|
|
struct rb_native_thread *nt = th->nt;
|
|
|
|
|
|
|
|
rb_native_cond_destroy(&nt->cond.readyq);
|
2018-08-14 00:34:20 +03:00
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
if (&nt->cond.readyq != &nt->cond.intr)
|
|
|
|
rb_native_cond_destroy(&nt->cond.intr);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2018-07-03 11:30:16 +03:00
|
|
|
/*
|
|
|
|
* prevent false positive from ruby_thread_has_gvl_p if that
|
|
|
|
* gets called from an interposing function wrapper
|
|
|
|
*/
|
|
|
|
if (USE_THREAD_CACHE)
|
|
|
|
ruby_thread_set_native(0);
|
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2011-02-06 13:59:03 +03:00
|
|
|
#if USE_THREAD_CACHE
|
2018-07-29 13:15:11 +03:00
|
|
|
static rb_thread_t *register_cached_thread_and_wait(void *);
|
2011-02-06 13:59:03 +03:00
|
|
|
#endif
|
|
|
|
|
2008-11-27 09:05:07 +03:00
|
|
|
#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
|
|
|
|
#define STACKADDR_AVAILABLE 1
|
|
|
|
#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
|
|
|
|
#define STACKADDR_AVAILABLE 1
|
2012-10-03 10:33:08 +04:00
|
|
|
#undef MAINSTACKADDR_AVAILABLE
|
2013-11-18 17:47:12 +04:00
|
|
|
#define MAINSTACKADDR_AVAILABLE 1
|
2012-04-10 11:53:24 +04:00
|
|
|
void *pthread_get_stackaddr_np(pthread_t);
|
|
|
|
size_t pthread_get_stacksize_np(pthread_t);
|
2008-11-27 09:05:07 +03:00
|
|
|
#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
|
|
|
|
#define STACKADDR_AVAILABLE 1
|
2010-06-29 21:29:27 +04:00
|
|
|
#elif defined HAVE_PTHREAD_GETTHRDS_NP
|
|
|
|
#define STACKADDR_AVAILABLE 1
|
2015-11-23 06:54:42 +03:00
|
|
|
#elif defined __HAIKU__
|
|
|
|
#define STACKADDR_AVAILABLE 1
|
2008-11-27 09:05:07 +03:00
|
|
|
#endif
|
|
|
|
|
2012-10-03 10:33:08 +04:00
|
|
|
#ifndef MAINSTACKADDR_AVAILABLE
|
|
|
|
# ifdef STACKADDR_AVAILABLE
|
|
|
|
# define MAINSTACKADDR_AVAILABLE 1
|
|
|
|
# else
|
|
|
|
# define MAINSTACKADDR_AVAILABLE 0
|
|
|
|
# endif
|
|
|
|
#endif
|
2013-08-30 17:37:46 +04:00
|
|
|
#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
|
|
|
|
# define get_main_stack(addr, size) get_stack(addr, size)
|
|
|
|
#endif
|
2012-10-03 10:33:08 +04:00
|
|
|
|
2008-11-27 09:05:07 +03:00
|
|
|
#ifdef STACKADDR_AVAILABLE
|
2011-07-04 02:16:02 +04:00
|
|
|
/*
|
2011-07-04 16:58:07 +04:00
|
|
|
* Get the initial address and size of current thread's stack
|
2011-07-04 02:16:02 +04:00
|
|
|
*/
|
2008-11-27 09:05:07 +03:00
|
|
|
static int
|
|
|
|
get_stack(void **addr, size_t *size)
|
|
|
|
{
|
|
|
|
#define CHECK_ERR(expr) \
|
|
|
|
{int err = (expr); if (err) return err;}
|
2012-06-14 03:18:35 +04:00
|
|
|
#ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
|
2008-11-27 09:05:07 +03:00
|
|
|
pthread_attr_t attr;
|
|
|
|
size_t guard = 0;
|
2011-07-03 01:18:16 +04:00
|
|
|
STACK_GROW_DIR_DETECTION;
|
2008-11-27 09:05:07 +03:00
|
|
|
CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
|
2012-06-14 03:18:35 +04:00
|
|
|
# ifdef HAVE_PTHREAD_ATTR_GETSTACK
|
2008-11-27 09:05:07 +03:00
|
|
|
CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
|
2011-07-03 01:18:16 +04:00
|
|
|
STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
|
2012-06-14 03:18:35 +04:00
|
|
|
# else
|
2008-11-27 09:05:07 +03:00
|
|
|
CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
|
|
|
|
CHECK_ERR(pthread_attr_getstacksize(&attr, size));
|
2012-06-14 03:18:35 +04:00
|
|
|
# endif
|
2018-02-15 04:59:16 +03:00
|
|
|
# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
|
2012-06-14 03:18:35 +04:00
|
|
|
CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
|
2018-02-15 04:59:16 +03:00
|
|
|
# else
|
2021-08-16 10:02:49 +03:00
|
|
|
guard = getpagesize();
|
2018-02-15 04:59:16 +03:00
|
|
|
# endif
|
2021-08-16 10:02:49 +03:00
|
|
|
*size -= guard;
|
2012-06-14 03:18:35 +04:00
|
|
|
pthread_attr_destroy(&attr);
|
|
|
|
#elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
|
|
|
|
pthread_attr_t attr;
|
2008-11-27 09:05:07 +03:00
|
|
|
CHECK_ERR(pthread_attr_init(&attr));
|
|
|
|
CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
|
2012-06-14 03:18:35 +04:00
|
|
|
# ifdef HAVE_PTHREAD_ATTR_GETSTACK
|
2011-07-04 17:27:31 +04:00
|
|
|
CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
|
2012-06-14 03:18:35 +04:00
|
|
|
# else
|
2008-11-27 09:05:07 +03:00
|
|
|
CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
|
|
|
|
CHECK_ERR(pthread_attr_getstacksize(&attr, size));
|
2012-06-10 12:54:32 +04:00
|
|
|
# endif
|
2013-08-28 12:20:10 +04:00
|
|
|
STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
|
2012-06-14 03:18:35 +04:00
|
|
|
pthread_attr_destroy(&attr);
|
2012-06-10 12:54:32 +04:00
|
|
|
#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
|
|
|
|
pthread_t th = pthread_self();
|
|
|
|
*addr = pthread_get_stackaddr_np(th);
|
|
|
|
*size = pthread_get_stacksize_np(th);
|
2008-11-27 09:05:07 +03:00
|
|
|
#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
|
|
|
|
stack_t stk;
|
2011-07-04 02:16:02 +04:00
|
|
|
# if defined HAVE_THR_STKSEGMENT /* Solaris */
|
2008-11-27 09:05:07 +03:00
|
|
|
CHECK_ERR(thr_stksegment(&stk));
|
2011-07-04 02:16:02 +04:00
|
|
|
# else /* OpenBSD */
|
2008-11-27 09:05:07 +03:00
|
|
|
CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
|
|
|
|
# endif
|
|
|
|
*addr = stk.ss_sp;
|
|
|
|
*size = stk.ss_size;
|
2011-07-04 02:16:02 +04:00
|
|
|
#elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
|
2010-06-29 21:29:27 +04:00
|
|
|
pthread_t th = pthread_self();
|
|
|
|
struct __pthrdsinfo thinfo;
|
|
|
|
char reg[256];
|
|
|
|
int regsiz=sizeof(reg);
|
|
|
|
CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
|
|
|
|
&thinfo, sizeof(thinfo),
|
|
|
|
®, ®siz));
|
|
|
|
*addr = thinfo.__pi_stackaddr;
|
2016-03-04 21:14:18 +03:00
|
|
|
/* Must not use thinfo.__pi_stacksize for size.
|
|
|
|
It is around 3KB smaller than the correct size
|
|
|
|
calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
|
|
|
|
*size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
|
2011-07-11 00:39:35 +04:00
|
|
|
STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
|
2015-11-23 06:54:42 +03:00
|
|
|
#elif defined __HAIKU__
|
|
|
|
thread_info info;
|
|
|
|
STACK_GROW_DIR_DETECTION;
|
|
|
|
CHECK_ERR(get_thread_info(find_thread(NULL), &info));
|
|
|
|
*addr = info.stack_base;
|
|
|
|
*size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
|
|
|
|
STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
|
2011-07-04 02:16:02 +04:00
|
|
|
#else
|
|
|
|
#error STACKADDR_AVAILABLE is defined but not implemented.
|
2008-11-27 09:05:07 +03:00
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
#undef CHECK_ERR
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-06-14 06:59:19 +04:00
|
|
|
static struct {
|
2013-07-23 14:38:36 +04:00
|
|
|
rb_nativethread_id_t id;
|
2008-06-14 06:59:19 +04:00
|
|
|
size_t stack_maxsize;
|
|
|
|
VALUE *stack_start;
|
|
|
|
} native_main_thread;
|
|
|
|
|
2008-07-06 19:59:40 +04:00
|
|
|
#ifdef STACK_END_ADDRESS
|
|
|
|
extern void *STACK_END_ADDRESS;
|
|
|
|
#endif
|
|
|
|
|
2013-04-04 12:01:23 +04:00
|
|
|
enum {
|
|
|
|
RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
|
|
|
|
RUBY_STACK_SPACE_RATIO = 5
|
|
|
|
};
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
space_size(size_t stack_size)
|
|
|
|
{
|
|
|
|
size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
|
|
|
|
if (space_size > RUBY_STACK_SPACE_LIMIT) {
|
|
|
|
return RUBY_STACK_SPACE_LIMIT;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return space_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-31 06:06:26 +03:00
|
|
|
#ifdef __linux__
|
|
|
|
static __attribute__((noinline)) void
|
|
|
|
reserve_stack(volatile char *limit, size_t size)
|
|
|
|
{
|
|
|
|
# ifdef C_ALLOCA
|
|
|
|
# error needs alloca()
|
|
|
|
# endif
|
|
|
|
struct rlimit rl;
|
|
|
|
volatile char buf[0x100];
|
2015-04-15 01:34:53 +03:00
|
|
|
enum {stack_check_margin = 0x1000}; /* for -fstack-check */
|
|
|
|
|
2015-01-31 06:06:26 +03:00
|
|
|
STACK_GROW_DIR_DETECTION;
|
|
|
|
|
|
|
|
if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
|
|
|
|
return;
|
|
|
|
|
2015-04-15 01:34:53 +03:00
|
|
|
if (size < stack_check_margin) return;
|
|
|
|
size -= stack_check_margin;
|
|
|
|
|
2015-01-31 06:06:26 +03:00
|
|
|
size -= sizeof(buf); /* margin */
|
|
|
|
if (IS_STACK_DIR_UPPER()) {
|
|
|
|
const volatile char *end = buf + sizeof(buf);
|
|
|
|
limit += size;
|
|
|
|
if (limit > end) {
|
2016-03-24 19:51:30 +03:00
|
|
|
/* |<-bottom (=limit(a)) top->|
|
|
|
|
* | .. |<-buf 256B |<-end | stack check |
|
|
|
|
* | 256B | =size= | margin (4KB)|
|
|
|
|
* | =size= limit(b)->| 256B | |
|
|
|
|
* | | alloca(sz) | | |
|
|
|
|
* | .. |<-buf |<-limit(c) [sz-1]->0> | |
|
|
|
|
*/
|
|
|
|
size_t sz = limit - end;
|
|
|
|
limit = alloca(sz);
|
|
|
|
limit[sz-1] = 0;
|
2015-01-31 06:06:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
limit -= size;
|
|
|
|
if (buf > limit) {
|
2016-03-24 19:51:30 +03:00
|
|
|
/* |<-top (=limit(a)) bottom->|
|
|
|
|
* | .. | 256B buf->| | stack check |
|
|
|
|
* | 256B | =size= | margin (4KB)|
|
|
|
|
* | =size= limit(b)->| 256B | |
|
|
|
|
* | | alloca(sz) | | |
|
|
|
|
* | .. | buf->| limit(c)-><0> | |
|
|
|
|
*/
|
|
|
|
size_t sz = buf - limit;
|
|
|
|
limit = alloca(sz);
|
|
|
|
limit[0] = 0;
|
2015-01-31 06:06:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
# define reserve_stack(limit, size) ((void)(limit), (void)(size))
|
|
|
|
#endif
|
|
|
|
|
2008-06-14 06:59:19 +04:00
|
|
|
#undef ruby_init_stack
|
|
|
|
void
|
2019-06-19 12:06:57 +03:00
|
|
|
ruby_init_stack(volatile VALUE *addr)
|
2008-06-14 06:59:19 +04:00
|
|
|
{
|
|
|
|
native_main_thread.id = pthread_self();
|
2019-06-19 12:06:57 +03:00
|
|
|
|
2014-01-28 11:33:20 +04:00
|
|
|
#if MAINSTACKADDR_AVAILABLE
|
|
|
|
if (native_main_thread.stack_maxsize) return;
|
|
|
|
{
|
|
|
|
void* stackaddr;
|
|
|
|
size_t size;
|
|
|
|
if (get_main_stack(&stackaddr, &size) == 0) {
|
|
|
|
native_main_thread.stack_maxsize = size;
|
|
|
|
native_main_thread.stack_start = stackaddr;
|
2015-01-31 06:06:26 +03:00
|
|
|
reserve_stack(stackaddr, size);
|
2016-09-03 05:28:31 +03:00
|
|
|
goto bound_check;
|
2014-01-28 11:33:20 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2008-07-06 19:59:40 +04:00
|
|
|
#ifdef STACK_END_ADDRESS
|
|
|
|
native_main_thread.stack_start = STACK_END_ADDRESS;
|
|
|
|
#else
|
2008-06-14 06:59:19 +04:00
|
|
|
if (!native_main_thread.stack_start ||
|
2009-02-27 12:01:21 +03:00
|
|
|
STACK_UPPER((VALUE *)(void *)&addr,
|
2008-06-14 06:59:19 +04:00
|
|
|
native_main_thread.stack_start > addr,
|
|
|
|
native_main_thread.stack_start < addr)) {
|
2009-04-19 09:43:20 +04:00
|
|
|
native_main_thread.stack_start = (VALUE *)addr;
|
2008-06-14 06:59:19 +04:00
|
|
|
}
|
2013-11-18 17:47:12 +04:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
#if defined(HAVE_GETRLIMIT)
|
2013-04-04 12:01:23 +04:00
|
|
|
#if defined(PTHREAD_STACK_DEFAULT)
|
|
|
|
# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
|
|
|
|
# error "PTHREAD_STACK_DEFAULT is too small"
|
|
|
|
# endif
|
|
|
|
size_t size = PTHREAD_STACK_DEFAULT;
|
|
|
|
#else
|
|
|
|
size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
|
|
|
|
#endif
|
2013-11-18 17:47:12 +04:00
|
|
|
size_t space;
|
2012-10-04 06:43:30 +04:00
|
|
|
int pagesize = getpagesize();
|
2008-06-14 06:59:19 +04:00
|
|
|
struct rlimit rlim;
|
2013-04-27 15:21:10 +04:00
|
|
|
STACK_GROW_DIR_DETECTION;
|
2008-06-14 06:59:19 +04:00
|
|
|
if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
|
2010-02-03 06:35:11 +03:00
|
|
|
size = (size_t)rlim.rlim_cur;
|
2008-06-14 06:59:19 +04:00
|
|
|
}
|
2012-10-04 06:43:30 +04:00
|
|
|
addr = native_main_thread.stack_start;
|
|
|
|
if (IS_STACK_DIR_UPPER()) {
|
|
|
|
space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
|
|
|
|
}
|
|
|
|
native_main_thread.stack_maxsize = space;
|
2012-06-09 03:30:55 +04:00
|
|
|
#endif
|
2010-02-03 06:35:11 +03:00
|
|
|
}
|
2013-01-23 08:39:02 +04:00
|
|
|
|
2016-09-03 05:28:31 +03:00
|
|
|
#if MAINSTACKADDR_AVAILABLE
|
|
|
|
bound_check:
|
|
|
|
#endif
|
2013-01-23 08:39:02 +04:00
|
|
|
/* If addr is out of range of main-thread stack range estimation, */
|
|
|
|
/* it should be on co-routine (alternative stack). [Feature #2294] */
|
|
|
|
{
|
|
|
|
void *start, *end;
|
2013-04-26 12:43:37 +04:00
|
|
|
STACK_GROW_DIR_DETECTION;
|
2013-01-23 08:39:02 +04:00
|
|
|
|
|
|
|
if (IS_STACK_DIR_UPPER()) {
|
|
|
|
start = native_main_thread.stack_start;
|
|
|
|
end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
|
|
|
|
end = native_main_thread.stack_start;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((void *)addr < start || (void *)addr > end) {
|
|
|
|
/* out of range */
|
|
|
|
native_main_thread.stack_start = (VALUE *)addr;
|
|
|
|
native_main_thread.stack_maxsize = 0; /* unknown */
|
|
|
|
}
|
|
|
|
}
|
2008-06-14 06:59:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#define CHECK_ERR(expr) \
|
2009-11-24 14:03:51 +03:00
|
|
|
{int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
|
2008-06-14 06:59:19 +04:00
|
|
|
|
|
|
|
static int
|
|
|
|
native_thread_init_stack(rb_thread_t *th)
|
|
|
|
{
|
2013-07-23 14:38:36 +04:00
|
|
|
rb_nativethread_id_t curr = pthread_self();
|
2008-06-14 06:59:19 +04:00
|
|
|
|
|
|
|
if (pthread_equal(curr, native_main_thread.id)) {
|
2017-10-26 11:32:49 +03:00
|
|
|
th->ec->machine.stack_start = native_main_thread.stack_start;
|
|
|
|
th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
|
2008-06-14 06:59:19 +04:00
|
|
|
}
|
|
|
|
else {
|
2010-02-04 10:17:03 +03:00
|
|
|
#ifdef STACKADDR_AVAILABLE
|
2008-06-17 16:58:31 +04:00
|
|
|
void *start;
|
2010-02-04 10:17:03 +03:00
|
|
|
size_t size;
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2010-02-04 10:17:03 +03:00
|
|
|
if (get_stack(&start, &size) == 0) {
|
2017-10-24 00:50:08 +03:00
|
|
|
uintptr_t diff = (uintptr_t)start - (uintptr_t)&curr;
|
2017-10-26 11:32:49 +03:00
|
|
|
th->ec->machine.stack_start = (VALUE *)&curr;
|
|
|
|
th->ec->machine.stack_maxsize = size - diff;
|
2010-02-04 10:17:03 +03:00
|
|
|
}
|
2008-06-14 06:59:19 +04:00
|
|
|
#else
|
|
|
|
rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
|
|
|
|
#endif
|
|
|
|
}
|
2019-06-19 12:06:57 +03:00
|
|
|
|
2008-06-14 06:59:19 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-03 01:18:16 +04:00
|
|
|
#ifndef __CYGWIN__
|
|
|
|
#define USE_NATIVE_THREAD_INIT 1
|
|
|
|
#endif
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
static void *
|
|
|
|
thread_start_func_1(void *th_ptr)
|
|
|
|
{
|
2018-02-18 12:23:47 +03:00
|
|
|
rb_thread_t *th = th_ptr;
|
2023-03-30 21:52:58 +03:00
|
|
|
|
|
|
|
#if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
|
|
|
|
ruby_nt_serial = th->nt->serial;
|
|
|
|
#endif
|
|
|
|
|
2022-05-24 10:39:45 +03:00
|
|
|
RB_ALTSTACK_INIT(void *altstack, th->nt->altstack);
|
2006-12-31 18:02:22 +03:00
|
|
|
#if USE_THREAD_CACHE
|
|
|
|
thread_start:
|
|
|
|
#endif
|
|
|
|
{
|
2011-07-04 10:52:35 +04:00
|
|
|
#if !defined USE_NATIVE_THREAD_INIT
|
2006-12-31 18:02:22 +03:00
|
|
|
VALUE stack_start;
|
2011-07-04 10:52:35 +04:00
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2011-07-03 01:18:16 +04:00
|
|
|
#if defined USE_NATIVE_THREAD_INIT
|
2010-05-13 20:20:26 +04:00
|
|
|
native_thread_init_stack(th);
|
2010-07-01 11:25:20 +04:00
|
|
|
#endif
|
2022-04-22 15:19:03 +03:00
|
|
|
|
|
|
|
native_thread_init(th->nt);
|
|
|
|
|
2022-06-15 15:37:41 +03:00
|
|
|
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED);
|
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
/* run */
|
2011-07-03 01:18:16 +04:00
|
|
|
#if defined USE_NATIVE_THREAD_INIT
|
2019-06-19 14:33:24 +03:00
|
|
|
thread_start_func_2(th, th->ec->machine.stack_start);
|
2011-07-03 01:18:16 +04:00
|
|
|
#else
|
2019-06-19 14:33:24 +03:00
|
|
|
thread_start_func_2(th, &stack_start);
|
2011-07-03 01:18:16 +04:00
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
#if USE_THREAD_CACHE
|
2018-07-29 13:15:11 +03:00
|
|
|
/* cache thread */
|
|
|
|
if ((th = register_cached_thread_and_wait(RB_ALTSTACK(altstack))) != 0) {
|
|
|
|
goto thread_start;
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
2018-07-29 13:15:11 +03:00
|
|
|
#else
|
2018-04-21 00:38:27 +03:00
|
|
|
RB_ALTSTACK_FREE(altstack);
|
2018-07-29 13:15:11 +03:00
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct cached_thread_entry {
|
2018-02-18 10:54:10 +03:00
|
|
|
rb_nativethread_cond_t cond;
|
|
|
|
rb_nativethread_id_t thread_id;
|
|
|
|
rb_thread_t *th;
|
2018-07-29 13:15:11 +03:00
|
|
|
void *altstack;
|
2022-03-30 10:36:31 +03:00
|
|
|
struct ccan_list_node node;
|
2006-12-31 18:02:22 +03:00
|
|
|
};
|
|
|
|
|
* this commit is a result of refactoring. only renaming functions,
moving definitions place, add/remove prototypes, deleting
unused variables and removing yarv.h.
This commit doesn't change any behavior of ruby/vm.
* yarv.h, common.mk: remove yarv.h (contents are moved to yarvcore.h).
* error.c, eval_intern.h: include yarvcore.h instead yarv.h
* rename some functions:
* debug.[ch]: debug_*() -> ruby_debug_*()
* iseq.c: iseq_*() -> rb_iseq_*(), ruby_iseq_disasm()
* iseq.c: node_name() -> ruby_node_name()
* vm.c: yarv_check_redefinition_opt_method() ->
rb_vm_check_redefinition_opt_method()
* some refactoring with checking -Wall.
* array.c: remove rb_ary_ptr() (unused) and remove unused
local variables.
* object.c: add a prototype of rb_mod_module_exec().
* eval_intern.h (ruby_cref): set it inline.
* eval_load.c (rb_load), yarvcore.c: yarv_load() -> rb_load_internal().
* parse.y: add a prototype of rb_parse_in_eval() (in eval.c).
* process.c: add a prototype of rb_thread_stop_timer_thread() (in thread.c).
* thread.c: remove raw_gets() function (unused) and fix some format
mismatch (format mismatchs have remained yet. this is todo).
* thread.c (rb_thread_wait_fd_rw): fix typo on label name.
* thread_pthread.ci: comment out codes with USE_THREAD_CACHE.
* vm.c (rb_svar, rb_backref_get, rb_backref_get,
rb_lastline_get, rb_lastline_set) : moved from yarvcore.c.
* vm.c (yarv_init_redefined_flag): add a prototype and rename
yarv_opt_method_table to vm_opt_method_table.
* vm.c (rb_thread_eval): moved from yarvcore.c.
* yarvcore.c: remove unused global variables and fix to use nsdr().
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11652 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-07 04:25:05 +03:00
|
|
|
#if USE_THREAD_CACHE
|
thread_pthread: prefer rb_nativethread* types/functions
This will make it easier for us to try alternative mutex/condvar
implementations while still using pthreads for thread management.
[Feature #10134]
* thread_pthread.h: define RB_NATIVETHREAD_LOCK_INIT and
RB_NATIVETHREAD_COND_INIT macros
* thread_pthread.c (native_mutex_lock, native_mutex_unlock,
native_mutex_trylock, native_mutex_initialize,
native_mutex_destroy, native_cond_wait):
use rb_nativethread_lock_t instead of pthread_mutex_t
* thread_pthread.c (native_mutex_debug): make argument type-agnostic
to avoid later cast.
* thread_pthread.c (register_cached_thread_and_wait):
replace PTHREAD_COND_INITIALIZER with RB_NATIVETHREAD_COND_INIT,
use native_mutex_{lock,unlock}
* thread_pthread.c (use_cached_thread):
use native_mutex_{lock,unlock}
* thread_pthread.c (native_sleep):
use rb_nativethread_lock_t to match th->interrupt_lock,
use native_mutex_{lock,unlock}
* thread_pthread.c (timer_thread_lock): use rb_nativethread_lock_t type
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@47185 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-08-15 04:17:53 +04:00
|
|
|
static rb_nativethread_lock_t thread_cache_lock = RB_NATIVETHREAD_LOCK_INIT;
|
2022-03-30 10:36:31 +03:00
|
|
|
static CCAN_LIST_HEAD(cached_thread_head);
|
2018-02-18 10:54:10 +03:00
|
|
|
|
|
|
|
# if defined(HAVE_WORKING_FORK)
|
|
|
|
static void
|
|
|
|
thread_cache_reset(void)
|
|
|
|
{
|
|
|
|
rb_native_mutex_initialize(&thread_cache_lock);
|
2022-03-30 10:36:31 +03:00
|
|
|
ccan_list_head_init(&cached_thread_head);
|
2018-02-18 10:54:10 +03:00
|
|
|
}
|
|
|
|
# endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2018-05-24 00:16:56 +03:00
|
|
|
/*
|
|
|
|
* number of seconds to cache for, I think 1-5s is sufficient to obviate
|
|
|
|
* the need for thread pool in many network programs (taking into account
|
|
|
|
* worst case network latency across the globe) without wasting memory
|
|
|
|
*/
|
|
|
|
#ifndef THREAD_CACHE_TIME
|
2018-08-25 09:58:35 +03:00
|
|
|
# define THREAD_CACHE_TIME ((rb_hrtime_t)3 * RB_HRTIME_PER_SEC)
|
2018-05-24 00:16:56 +03:00
|
|
|
#endif
|
|
|
|
|
* blockinlining.c, error.c, eval.c, eval_error.h, eval_intern.h,
eval_jump.h, eval_load.c, eval_safe.h, gc.c, proc.c, signal.c,
thread.c, thread_pthread.ci, thread_win32.ci, vm.c, vm.h,
vm_dump.c, vm_evalbody.ci, yarvcore.c, yarvcore.h:
fix typo (rb_thead_t -> rb_thread_t).
* eval_intern.h: remove unused definitions.
* common.mk: fix around vm_opts.h path
and remove harmful argument passed to insns2vm.rb.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11658 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-08 09:37:46 +03:00
|
|
|
static rb_thread_t *
|
2018-07-29 13:15:11 +03:00
|
|
|
register_cached_thread_and_wait(void *altstack)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2018-08-25 09:58:35 +03:00
|
|
|
rb_hrtime_t end = THREAD_CACHE_TIME;
|
2018-02-18 10:54:10 +03:00
|
|
|
struct cached_thread_entry entry;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2018-04-22 15:09:07 +03:00
|
|
|
rb_native_cond_initialize(&entry.cond);
|
2018-07-29 13:15:11 +03:00
|
|
|
entry.altstack = altstack;
|
2018-02-18 10:54:10 +03:00
|
|
|
entry.th = NULL;
|
2018-05-24 05:52:46 +03:00
|
|
|
entry.thread_id = pthread_self();
|
2018-02-19 12:28:51 +03:00
|
|
|
end = native_cond_timeout(&entry.cond, end);
|
* compile.c, dir.c, eval.c, eval_jump.h, eval_method.h, numeric.c,
pack.c, parse.y, re.c, thread.c, vm.c, vm_dump.c, call_cfunc.ci,
thread_pthread.ci, thread_win32.ci: fixed indentation.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12431 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-05 08:25:10 +04:00
|
|
|
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_lock(&thread_cache_lock);
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2022-03-30 10:36:31 +03:00
|
|
|
ccan_list_add(&cached_thread_head, &entry.node);
|
2018-02-18 10:54:10 +03:00
|
|
|
|
|
|
|
native_cond_timedwait(&entry.cond, &thread_cache_lock, &end);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2018-02-18 10:54:10 +03:00
|
|
|
if (entry.th == NULL) { /* unused */
|
2022-03-30 10:36:31 +03:00
|
|
|
ccan_list_del(&entry.node);
|
2018-02-18 10:54:10 +03:00
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_unlock(&thread_cache_lock);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2018-02-19 02:58:35 +03:00
|
|
|
rb_native_cond_destroy(&entry.cond);
|
2018-07-29 13:15:11 +03:00
|
|
|
if (!entry.th) {
|
|
|
|
RB_ALTSTACK_FREE(entry.altstack);
|
|
|
|
}
|
2018-02-19 02:58:35 +03:00
|
|
|
|
2018-02-18 10:54:10 +03:00
|
|
|
return entry.th;
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
2018-02-18 10:54:10 +03:00
|
|
|
#else
|
|
|
|
# if defined(HAVE_WORKING_FORK)
|
|
|
|
static void thread_cache_reset(void) { }
|
|
|
|
# endif
|
* this commit is a result of refactoring. only renaming functions,
moving definitions place, add/remove prototypes, deleting
unused variables and removing yarv.h.
This commit doesn't change any behavior of ruby/vm.
* yarv.h, common.mk: remove yarv.h (contents are moved to yarvcore.h).
* error.c, eval_intern.h: include yarvcore.h instead yarv.h
* rename some functions:
* debug.[ch]: debug_*() -> ruby_debug_*()
* iseq.c: iseq_*() -> rb_iseq_*(), ruby_iseq_disasm()
* iseq.c: node_name() -> ruby_node_name()
* vm.c: yarv_check_redefinition_opt_method() ->
rb_vm_check_redefinition_opt_method()
* some refactoring with checking -Wall.
* array.c: remove rb_ary_ptr() (unused) and remove unused
local variables.
* object.c: add a prototype of rb_mod_module_exec().
* eval_intern.h (ruby_cref): set it inline.
* eval_load.c (rb_load), yarvcore.c: yarv_load() -> rb_load_internal().
* parse.y: add a prototype of rb_parse_in_eval() (in eval.c).
* process.c: add a prototype of rb_thread_stop_timer_thread() (in thread.c).
* thread.c: remove raw_gets() function (unused) and fix some format
mismatch (format mismatchs have remained yet. this is todo).
* thread.c (rb_thread_wait_fd_rw): fix typo on label name.
* thread_pthread.ci: comment out codes with USE_THREAD_CACHE.
* vm.c (rb_svar, rb_backref_get, rb_backref_get,
rb_lastline_get, rb_lastline_set) : moved from yarvcore.c.
* vm.c (yarv_init_redefined_flag): add a prototype and rename
yarv_opt_method_table to vm_opt_method_table.
* vm.c (rb_thread_eval): moved from yarvcore.c.
* yarvcore.c: remove unused global variables and fix to use nsdr().
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11652 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-07 04:25:05 +03:00
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
static int
|
* blockinlining.c, error.c, eval.c, eval_error.h, eval_intern.h,
eval_jump.h, eval_load.c, eval_safe.h, gc.c, proc.c, signal.c,
thread.c, thread_pthread.ci, thread_win32.ci, vm.c, vm.h,
vm_dump.c, vm_evalbody.ci, yarvcore.c, yarvcore.h:
fix typo (rb_thead_t -> rb_thread_t).
* eval_intern.h: remove unused definitions.
* common.mk: fix around vm_opts.h path
and remove harmful argument passed to insns2vm.rb.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11658 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-08 09:37:46 +03:00
|
|
|
use_cached_thread(rb_thread_t *th)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
|
|
|
#if USE_THREAD_CACHE
|
|
|
|
struct cached_thread_entry *entry;
|
|
|
|
|
2018-02-18 10:54:10 +03:00
|
|
|
rb_native_mutex_lock(&thread_cache_lock);
|
2022-03-30 10:36:31 +03:00
|
|
|
entry = ccan_list_pop(&cached_thread_head, struct cached_thread_entry, node);
|
2018-02-18 10:54:10 +03:00
|
|
|
if (entry) {
|
|
|
|
entry->th = th;
|
2022-04-22 15:19:03 +03:00
|
|
|
/* th->nt->thread_id must be set before signal for Thread#name= */
|
|
|
|
th->nt->thread_id = entry->thread_id;
|
2018-02-18 10:54:10 +03:00
|
|
|
rb_native_cond_signal(&entry->cond);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
2018-02-18 10:54:10 +03:00
|
|
|
rb_native_mutex_unlock(&thread_cache_lock);
|
|
|
|
return !!entry;
|
2006-12-31 18:02:22 +03:00
|
|
|
#endif
|
2018-02-18 10:54:10 +03:00
|
|
|
return 0;
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
2022-04-16 21:40:23 +03:00
|
|
|
#if 0
|
|
|
|
// TODO
|
2018-07-29 13:15:11 +03:00
|
|
|
static void
|
|
|
|
clear_thread_cache_altstack(void)
|
|
|
|
{
|
|
|
|
#if USE_THREAD_CACHE
|
|
|
|
struct cached_thread_entry *entry;
|
|
|
|
|
|
|
|
rb_native_mutex_lock(&thread_cache_lock);
|
2022-03-30 10:36:31 +03:00
|
|
|
ccan_list_for_each(&cached_thread_head, entry, node) {
|
2018-07-29 13:15:11 +03:00
|
|
|
void MAYBE_UNUSED(*altstack) = entry->altstack;
|
|
|
|
entry->altstack = 0;
|
|
|
|
RB_ALTSTACK_FREE(altstack);
|
|
|
|
}
|
|
|
|
rb_native_mutex_unlock(&thread_cache_lock);
|
|
|
|
#endif
|
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
#endif
|
2018-07-29 13:15:11 +03:00
|
|
|
|
2023-03-30 21:52:58 +03:00
|
|
|
static struct rb_native_thread *
|
|
|
|
native_thread_alloc(void)
|
|
|
|
{
|
|
|
|
struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
|
|
|
|
#if USE_RUBY_DEBUG_LOG
|
|
|
|
static rb_atomic_t nt_serial = 1;
|
|
|
|
nt->serial = RUBY_ATOMIC_FETCH_ADD(nt_serial, 1);
|
|
|
|
#endif
|
|
|
|
return nt;
|
|
|
|
}
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
static int
|
* blockinlining.c, error.c, eval.c, eval_error.h, eval_intern.h,
eval_jump.h, eval_load.c, eval_safe.h, gc.c, proc.c, signal.c,
thread.c, thread_pthread.ci, thread_win32.ci, vm.c, vm.h,
vm_dump.c, vm_evalbody.ci, yarvcore.c, yarvcore.h:
fix typo (rb_thead_t -> rb_thread_t).
* eval_intern.h: remove unused definitions.
* common.mk: fix around vm_opts.h path
and remove harmful argument passed to insns2vm.rb.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11658 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-08 09:37:46 +03:00
|
|
|
native_thread_create(rb_thread_t *th)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
VM_ASSERT(th->nt == 0);
|
2023-03-30 21:52:58 +03:00
|
|
|
th->nt = native_thread_alloc();
|
2022-04-22 15:19:03 +03:00
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
if (use_cached_thread(th)) {
|
2022-05-23 21:58:18 +03:00
|
|
|
RUBY_DEBUG_LOG("use cached nt. th:%u", rb_th_serial(th));
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
pthread_attr_t attr;
|
2019-06-19 11:39:58 +03:00
|
|
|
const size_t stack_size = th->vm->default_params.thread_machine_stack_size + th->vm->default_params.thread_vm_stack_size;
|
2012-12-20 02:29:18 +04:00
|
|
|
const size_t space = space_size(stack_size);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2020-03-06 15:32:42 +03:00
|
|
|
#ifdef USE_SIGALTSTACK
|
2022-05-24 10:39:45 +03:00
|
|
|
th->nt->altstack = rb_allocate_sigaltstack();
|
2020-03-06 15:32:42 +03:00
|
|
|
#endif
|
2017-10-26 11:32:49 +03:00
|
|
|
th->ec->machine.stack_maxsize = stack_size - space;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2007-02-08 23:24:55 +03:00
|
|
|
CHECK_ERR(pthread_attr_init(&attr));
|
|
|
|
|
2012-06-10 16:51:37 +04:00
|
|
|
# ifdef PTHREAD_STACK_MIN
|
2022-05-23 21:58:18 +03:00
|
|
|
RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size);
|
|
|
|
CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
|
2012-06-10 16:51:37 +04:00
|
|
|
# endif
|
2007-02-08 23:24:55 +03:00
|
|
|
|
2012-06-10 16:51:37 +04:00
|
|
|
# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
|
2007-02-08 23:24:55 +03:00
|
|
|
CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
|
2012-06-10 16:51:37 +04:00
|
|
|
# endif
|
2007-02-08 23:24:55 +03:00
|
|
|
CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
|
2018-01-09 11:26:38 +03:00
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
err = pthread_create(&th->nt->thread_id, &attr, thread_start_func_1, th);
|
2022-05-23 21:58:18 +03:00
|
|
|
|
|
|
|
RUBY_DEBUG_LOG("th:%u err:%d", rb_th_serial(th), err);
|
|
|
|
|
|
|
|
/* should be done in the created thread */
|
2007-02-08 23:24:55 +03:00
|
|
|
CHECK_ERR(pthread_attr_destroy(&attr));
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-08-13 11:53:35 +04:00
|
|
|
#if USE_NATIVE_THREAD_PRIORITY
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
static void
|
* blockinlining.c, error.c, eval.c, eval_error.h, eval_intern.h,
eval_jump.h, eval_load.c, eval_safe.h, gc.c, proc.c, signal.c,
thread.c, thread_pthread.ci, thread_win32.ci, vm.c, vm.h,
vm_dump.c, vm_evalbody.ci, yarvcore.c, yarvcore.h:
fix typo (rb_thead_t -> rb_thread_t).
* eval_intern.h: remove unused definitions.
* common.mk: fix around vm_opts.h path
and remove harmful argument passed to insns2vm.rb.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11658 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-08 09:37:46 +03:00
|
|
|
native_thread_apply_priority(rb_thread_t *th)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2007-12-20 13:14:16 +03:00
|
|
|
#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
|
2006-12-31 18:02:22 +03:00
|
|
|
struct sched_param sp;
|
|
|
|
int policy;
|
|
|
|
int priority = 0 - th->priority;
|
|
|
|
int max, min;
|
2022-04-22 15:19:03 +03:00
|
|
|
pthread_getschedparam(th->nt->thread_id, &policy, &sp);
|
2006-12-31 18:02:22 +03:00
|
|
|
max = sched_get_priority_max(policy);
|
|
|
|
min = sched_get_priority_min(policy);
|
|
|
|
|
2008-05-19 16:09:14 +04:00
|
|
|
if (min > priority) {
|
2006-12-31 18:02:22 +03:00
|
|
|
priority = min;
|
|
|
|
}
|
2008-05-19 16:09:14 +04:00
|
|
|
else if (max < priority) {
|
|
|
|
priority = max;
|
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
sp.sched_priority = priority;
|
2022-04-22 15:19:03 +03:00
|
|
|
pthread_setschedparam(th->nt->thread_id, policy, &sp);
|
2007-12-20 13:14:16 +03:00
|
|
|
#else
|
|
|
|
/* not touched */
|
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
2008-08-13 11:53:35 +04:00
|
|
|
#endif /* USE_NATIVE_THREAD_PRIORITY */
|
|
|
|
|
2011-09-27 04:59:04 +04:00
|
|
|
static int
|
|
|
|
native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
|
|
|
|
{
|
|
|
|
return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
|
|
|
|
}
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
static void
|
2007-11-20 13:47:53 +03:00
|
|
|
ubf_pthread_cond_signal(void *ptr)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2007-11-20 13:47:53 +03:00
|
|
|
rb_thread_t *th = (rb_thread_t *)ptr;
|
2022-05-23 21:58:18 +03:00
|
|
|
RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
|
2022-04-22 15:19:03 +03:00
|
|
|
rb_native_cond_signal(&th->nt->cond.intr);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-08-25 09:58:35 +03:00
|
|
|
native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
thread_pthread: prefer rb_nativethread* types/functions
This will make it easier for us to try alternative mutex/condvar
implementations while still using pthreads for thread management.
[Feature #10134]
* thread_pthread.h: define RB_NATIVETHREAD_LOCK_INIT and
RB_NATIVETHREAD_COND_INIT macros
* thread_pthread.c (native_mutex_lock, native_mutex_unlock,
native_mutex_trylock, native_mutex_initialize,
native_mutex_destroy, native_cond_wait):
use rb_nativethread_lock_t instead of pthread_mutex_t
* thread_pthread.c (native_mutex_debug): make argument type-agnostic
to avoid later cast.
* thread_pthread.c (register_cached_thread_and_wait):
replace PTHREAD_COND_INITIALIZER with RB_NATIVETHREAD_COND_INIT,
use native_mutex_{lock,unlock}
* thread_pthread.c (use_cached_thread):
use native_mutex_{lock,unlock}
* thread_pthread.c (native_sleep):
use rb_nativethread_lock_t to match th->interrupt_lock,
use native_mutex_{lock,unlock}
* thread_pthread.c (timer_thread_lock): use rb_nativethread_lock_t type
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@47185 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-08-15 04:17:53 +04:00
|
|
|
rb_nativethread_lock_t *lock = &th->interrupt_lock;
|
2022-04-22 15:19:03 +03:00
|
|
|
rb_nativethread_cond_t *cond = &th->nt->cond.intr;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2018-08-25 09:58:35 +03:00
|
|
|
/* Solaris cond_timedwait() return EINVAL if an argument is greater than
|
|
|
|
* current_time + 100,000,000. So cut up to 100,000,000. This is
|
|
|
|
* considered as a kind of spurious wakeup. The caller to native_sleep
|
|
|
|
* should care about spurious wakeup.
|
|
|
|
*
|
|
|
|
* See also [Bug #1341] [ruby-core:29702]
|
|
|
|
* http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
|
|
|
|
*/
|
|
|
|
const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
|
* compile.c, dir.c, eval.c, eval_jump.h, eval_method.h, numeric.c,
pack.c, parse.y, re.c, thread.c, vm.c, vm_dump.c, call_cfunc.ci,
thread_pthread.ci, thread_win32.ci: fixed indentation.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12431 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-06-05 08:25:10 +04:00
|
|
|
|
2022-04-16 21:40:23 +03:00
|
|
|
THREAD_BLOCKING_BEGIN(th);
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_lock(lock);
|
2008-05-30 05:52:38 +04:00
|
|
|
th->unblock.func = ubf_pthread_cond_signal;
|
|
|
|
th->unblock.arg = th;
|
2007-06-05 08:49:54 +04:00
|
|
|
|
2017-11-06 10:44:28 +03:00
|
|
|
if (RUBY_VM_INTERRUPTED(th->ec)) {
|
2006-12-31 18:02:22 +03:00
|
|
|
/* interrupted. return immediate */
|
2022-05-23 21:58:18 +03:00
|
|
|
RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th));
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
else {
|
2018-08-25 09:58:35 +03:00
|
|
|
if (!rel) {
|
2018-02-07 04:57:14 +03:00
|
|
|
rb_native_cond_wait(cond, lock);
|
2018-08-25 09:58:35 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_hrtime_t end;
|
|
|
|
|
|
|
|
if (*rel > max) {
|
|
|
|
*rel = max;
|
|
|
|
}
|
|
|
|
|
|
|
|
end = native_cond_timeout(cond, *rel);
|
2020-03-09 20:22:11 +03:00
|
|
|
native_cond_timedwait(cond, lock, &end);
|
2018-08-25 09:58:35 +03:00
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
2008-05-30 05:52:38 +04:00
|
|
|
th->unblock.func = 0;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_unlock(lock);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
THREAD_BLOCKING_END(th);
|
2007-12-25 07:16:06 +03:00
|
|
|
|
2022-05-23 21:58:18 +03:00
|
|
|
RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th));
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
2015-11-30 23:33:20 +03:00
|
|
|
#ifdef USE_UBF_LIST
|
2022-03-30 10:36:31 +03:00
|
|
|
static CCAN_LIST_HEAD(ubf_list_head);
|
2018-08-21 00:34:39 +03:00
|
|
|
static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
|
|
|
|
|
|
|
|
static void
|
|
|
|
ubf_list_atfork(void)
|
|
|
|
{
|
2022-03-30 10:36:31 +03:00
|
|
|
ccan_list_head_init(&ubf_list_head);
|
2018-08-21 00:34:39 +03:00
|
|
|
rb_native_mutex_initialize(&ubf_list_lock);
|
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2015-11-30 23:33:20 +03:00
|
|
|
/* The thread 'th' is registered to be trying unblock. */
|
2006-12-31 18:02:22 +03:00
|
|
|
static void
|
2015-11-30 23:33:20 +03:00
|
|
|
register_ubf_list(rb_thread_t *th)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2022-04-22 15:19:03 +03:00
|
|
|
struct ccan_list_node *node = &th->sched.node.ubf;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2022-03-30 10:36:31 +03:00
|
|
|
if (ccan_list_empty((struct ccan_list_head*)node)) {
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_lock(&ubf_list_lock);
|
2022-03-30 10:36:31 +03:00
|
|
|
ccan_list_add(&ubf_list_head, node);
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_unlock(&ubf_list_lock);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-30 23:33:20 +03:00
|
|
|
/* The thread 'th' is unblocked. It no longer need to be registered. */
|
2006-12-31 18:02:22 +03:00
|
|
|
static void
|
2015-11-30 23:33:20 +03:00
|
|
|
unregister_ubf_list(rb_thread_t *th)
|
2006-12-31 18:02:22 +03:00
|
|
|
{
|
2022-04-22 15:19:03 +03:00
|
|
|
struct ccan_list_node *node = &th->sched.node.ubf;
|
2015-11-30 23:33:20 +03:00
|
|
|
|
2018-07-31 00:48:32 +03:00
|
|
|
/* we can't allow re-entry into ubf_list_head */
|
|
|
|
VM_ASSERT(th->unblock.func == 0);
|
|
|
|
|
2022-03-30 10:36:31 +03:00
|
|
|
if (!ccan_list_empty((struct ccan_list_head*)node)) {
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_lock(&ubf_list_lock);
|
2022-03-30 10:36:31 +03:00
|
|
|
ccan_list_del_init(node);
|
|
|
|
if (ccan_list_empty(&ubf_list_head) && !rb_signal_buff_size()) {
|
2018-08-15 07:32:46 +03:00
|
|
|
ubf_timer_disarm();
|
2018-08-14 00:34:24 +03:00
|
|
|
}
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_unlock(&ubf_list_lock);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
2011-05-08 05:50:36 +04:00
|
|
|
}
|
|
|
|
|
2015-11-30 23:33:20 +03:00
|
|
|
/*
|
|
|
|
* send a signal to intent that a target thread return from blocking syscall.
|
|
|
|
* Maybe any signal is ok, but we chose SIGVTALRM.
|
|
|
|
*/
|
2011-05-08 05:50:36 +04:00
|
|
|
static void
|
2015-11-30 23:33:20 +03:00
|
|
|
ubf_wakeup_thread(rb_thread_t *th)
|
2011-05-08 05:50:36 +04:00
|
|
|
{
|
2022-05-23 21:58:18 +03:00
|
|
|
RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
|
2022-04-22 15:19:03 +03:00
|
|
|
pthread_kill(th->nt->thread_id, SIGVTALRM);
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
2011-05-08 05:50:36 +04:00
|
|
|
static void
|
|
|
|
ubf_select(void *ptr)
|
|
|
|
{
|
|
|
|
rb_thread_t *th = (rb_thread_t *)ptr;
|
2022-04-16 21:40:23 +03:00
|
|
|
struct rb_thread_sched *sched = TH_SCHED(th);
|
2018-08-28 02:29:44 +03:00
|
|
|
const rb_thread_t *cur = ruby_thread_from_native(); /* may be 0 */
|
2018-08-14 00:34:20 +03:00
|
|
|
|
thread_pthread: remove timer-thread by restructuring GVL
To reduce resource use and reduce CI failure; remove
timer-thread. Single-threaded Ruby processes (including forked
children) will never see extra thread overhead. This prevents
glibc and jemalloc from going into multi-threaded mode and
initializing locks or causing fragmentation via arena explosion.
The GVL is implements its own wait-queue as a ccan/list to
permit controlling wakeup order. Timeslice under contention is
handled by a designated timer thread (similar to choosing a
"patrol_thread" for current deadlock checking).
There is only one self-pipe, now, as wakeups for timeslice are
done independently using condition variables. This reduces FD
pressure slightly.
Signal handling is handled directly by a Ruby Thread (instead
of timer-thread) by exposing signal self-pipe to callers of
rb_thread_fd_select, native_sleep, rb_wait_for_single_fd, etc...
Acquiring, using, and releasing the self-pipe is exposed via 4
new internal functions:
1) rb_sigwait_fd_get - exclusively acquire timer_thread_pipe.normal[0]
2) rb_sigwait_fd_sleep - sleep and wait for signal (and no other FDs)
3) rb_sigwait_fd_put - release acquired result from rb_sigwait_fd_get
4) rb_sigwait_fd_migrate - migrate signal handling to another thread
after calling rb_sigwait_fd_put.
rb_sigwait_fd_migrate is necessary for waitpid callers because
only one thread can wait on self-pipe at a time, otherwise a
deadlock will occur if threads fight over the self-pipe.
TRAP_INTERRUPT_MASK is now set for the main thread directly in
signal handler via rb_thread_wakeup_timer_thread.
Originally, I wanted to use POSIX timers
(timer_create/timer_settime) for this. Unfortunately, this
proved unfeasible as Mutex#sleep resumes on spurious wakeups and
test/thread/test_cv.rb::test_condvar_timed_wait failed. Using
pthread_sigmask to mask out SIGVTALRM fixed that test, but
test/fiddle/test_function.rb::test_nogvl_poll proved there'd be
some unavoidable (and frequent) incompatibilities from that
approach.
Finally, this allows us to drop thread_destruct_lock and
interrupt current ec directly.
We don't need to rely on vm->thread_destruct_lock or a coherent
vm->running_thread on any platform. Separate timer-thread for
time slice and signal handling is relegated to thread_win32.c,
now.
[ruby-core:88088] [Misc #14937]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@64107 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-07-29 23:47:33 +03:00
|
|
|
register_ubf_list(th);
|
2018-07-30 01:19:15 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ubf_wakeup_thread() doesn't guarantee to wake up a target thread.
|
|
|
|
* Therefore, we repeatedly call ubf_wakeup_thread() until a target thread
|
2018-08-27 20:17:08 +03:00
|
|
|
* exit from ubf function. We must have a timer to perform this operation.
|
|
|
|
* We use double-checked locking here because this function may be called
|
2018-08-28 02:29:44 +03:00
|
|
|
* while vm->gvl.lock is held in do_gvl_timer.
|
|
|
|
* There is also no need to start a timer if we're the designated
|
|
|
|
* sigwait_th thread, otherwise we can deadlock with a thread
|
|
|
|
* in unblock_function_clear.
|
2018-07-30 01:19:15 +03:00
|
|
|
*/
|
2022-04-16 21:40:23 +03:00
|
|
|
if (cur != sched->timer && cur != sigwait_th) {
|
2018-10-31 05:31:15 +03:00
|
|
|
/*
|
|
|
|
* Double-checked locking above was to prevent nested locking
|
|
|
|
* by the SAME thread. We use trylock here to prevent deadlocks
|
|
|
|
* between DIFFERENT threads
|
|
|
|
*/
|
2022-04-16 21:40:23 +03:00
|
|
|
if (rb_native_mutex_trylock(&sched->lock) == 0) {
|
|
|
|
if (!sched->timer) {
|
2018-10-31 05:31:15 +03:00
|
|
|
rb_thread_wakeup_timer_thread(-1);
|
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
rb_native_mutex_unlock(&sched->lock);
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-30 23:33:20 +03:00
|
|
|
ubf_wakeup_thread(th);
|
2011-05-08 05:50:36 +04:00
|
|
|
}
|
|
|
|
|
2015-11-30 23:33:20 +03:00
|
|
|
static int
|
|
|
|
ubf_threads_empty(void)
|
2012-03-15 07:31:50 +04:00
|
|
|
{
|
2022-03-30 10:36:31 +03:00
|
|
|
return ccan_list_empty(&ubf_list_head);
|
2011-12-22 17:45:58 +04:00
|
|
|
}
|
|
|
|
|
2015-11-30 23:33:20 +03:00
|
|
|
static void
|
|
|
|
ubf_wakeup_all_threads(void)
|
2011-12-22 17:45:58 +04:00
|
|
|
{
|
2015-11-30 23:33:20 +03:00
|
|
|
if (!ubf_threads_empty()) {
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_lock(&ubf_list_lock);
|
2022-04-22 15:19:03 +03:00
|
|
|
rb_thread_t *th;
|
|
|
|
|
|
|
|
ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
|
2015-11-30 23:33:20 +03:00
|
|
|
ubf_wakeup_thread(th);
|
|
|
|
}
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
rb_native_mutex_unlock(&ubf_list_lock);
|
2015-11-30 23:33:20 +03:00
|
|
|
}
|
2011-05-08 05:50:36 +04:00
|
|
|
}
|
2015-11-30 23:33:20 +03:00
|
|
|
|
|
|
|
#else /* USE_UBF_LIST */
|
|
|
|
#define register_ubf_list(th) (void)(th)
|
|
|
|
#define unregister_ubf_list(th) (void)(th)
|
2011-05-08 05:50:36 +04:00
|
|
|
#define ubf_select 0
|
2015-11-30 23:33:20 +03:00
|
|
|
static void ubf_wakeup_all_threads(void) { return; }
|
|
|
|
static int ubf_threads_empty(void) { return 1; }
|
2018-09-05 09:04:02 +03:00
|
|
|
#define ubf_list_atfork() do {} while (0)
|
2015-11-30 23:33:20 +03:00
|
|
|
#endif /* USE_UBF_LIST */
|
2011-05-08 05:50:36 +04:00
|
|
|
|
2012-06-10 16:51:37 +04:00
|
|
|
#define TT_DEBUG 0
|
|
|
|
#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
|
|
|
|
|
2014-05-10 18:48:58 +04:00
|
|
|
static struct {
|
2018-08-14 00:34:20 +03:00
|
|
|
/* pipes are closed in forked children when owner_process does not match */
|
2018-08-28 02:39:58 +03:00
|
|
|
int normal[2]; /* [0] == sigwait_fd */
|
2018-11-01 17:10:47 +03:00
|
|
|
int ub_main[2]; /* unblock main thread from native_ppoll_sleep */
|
2015-08-14 12:44:10 +03:00
|
|
|
|
|
|
|
/* volatile for signal handler use: */
|
2023-03-03 10:40:00 +03:00
|
|
|
volatile rb_serial_t fork_gen;
|
2018-08-15 07:32:41 +03:00
|
|
|
} signal_self_pipe = {
|
2014-05-10 18:48:58 +04:00
|
|
|
{-1, -1},
|
2018-11-01 17:10:47 +03:00
|
|
|
{-1, -1},
|
2014-05-10 18:48:58 +04:00
|
|
|
};
|
2011-06-27 04:30:41 +04:00
|
|
|
|
2011-06-29 01:17:29 +04:00
|
|
|
/* only use signal-safe system calls here */
|
2013-03-10 08:00:10 +04:00
|
|
|
static void
|
2018-07-13 02:23:25 +03:00
|
|
|
rb_thread_wakeup_timer_thread_fd(int fd)
|
2011-06-27 04:30:41 +04:00
|
|
|
{
|
2018-08-24 22:19:01 +03:00
|
|
|
#if USE_EVENTFD
|
|
|
|
const uint64_t buff = 1;
|
|
|
|
#else
|
|
|
|
const char buff = '!';
|
|
|
|
#endif
|
2011-06-27 06:43:03 +04:00
|
|
|
ssize_t result;
|
2011-06-27 04:30:41 +04:00
|
|
|
|
|
|
|
/* already opened */
|
2018-07-13 02:23:25 +03:00
|
|
|
if (fd >= 0) {
|
2011-06-27 04:30:41 +04:00
|
|
|
retry:
|
2018-08-24 22:19:01 +03:00
|
|
|
if ((result = write(fd, &buff, sizeof(buff))) <= 0) {
|
2014-05-10 19:52:45 +04:00
|
|
|
int e = errno;
|
|
|
|
switch (e) {
|
2011-06-27 04:30:41 +04:00
|
|
|
case EINTR: goto retry;
|
|
|
|
case EAGAIN:
|
|
|
|
#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
|
|
|
|
case EWOULDBLOCK:
|
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
default:
|
2015-09-06 18:27:25 +03:00
|
|
|
async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
|
2011-06-27 04:30:41 +04:00
|
|
|
}
|
|
|
|
}
|
2011-06-29 01:17:29 +04:00
|
|
|
if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
|
2011-06-27 04:30:41 +04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* ignore wakeup */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-15 10:16:55 +03:00
|
|
|
/*
|
|
|
|
* This ensures we get a SIGVTALRM in TIME_QUANTUM_MSEC if our
|
|
|
|
* process could not react to the original signal in time.
|
|
|
|
*/
|
2018-08-14 00:34:24 +03:00
|
|
|
static void
|
2023-03-03 10:40:00 +03:00
|
|
|
ubf_timer_arm(rb_serial_t fork_gen) /* async signal safe */
|
2018-08-14 00:34:24 +03:00
|
|
|
{
|
|
|
|
#if UBF_TIMER == UBF_TIMER_POSIX
|
2023-03-03 10:40:00 +03:00
|
|
|
if ((!fork_gen || timer_posix.fork_gen == fork_gen) &&
|
2021-02-02 12:23:07 +03:00
|
|
|
timer_state_cas(RTIMER_DISARM, RTIMER_ARMING) == RTIMER_DISARM) {
|
2018-08-14 00:34:24 +03:00
|
|
|
struct itimerspec it;
|
|
|
|
|
|
|
|
it.it_interval.tv_sec = it.it_value.tv_sec = 0;
|
|
|
|
it.it_interval.tv_nsec = it.it_value.tv_nsec = TIME_QUANTUM_NSEC;
|
|
|
|
|
|
|
|
if (timer_settime(timer_posix.timerid, 0, &it, 0))
|
|
|
|
rb_async_bug_errno("timer_settime (arm)", errno);
|
|
|
|
|
2021-02-02 12:23:07 +03:00
|
|
|
switch (timer_state_cas(RTIMER_ARMING, RTIMER_ARMED)) {
|
2018-12-16 10:51:09 +03:00
|
|
|
case RTIMER_DISARM:
|
2018-08-14 00:34:24 +03:00
|
|
|
/* somebody requested a disarm while we were arming */
|
2018-12-16 10:51:11 +03:00
|
|
|
/* may race harmlessly with ubf_timer_destroy */
|
2018-12-16 10:51:09 +03:00
|
|
|
(void)timer_settime(timer_posix.timerid, 0, &zero, 0);
|
2018-08-14 00:34:24 +03:00
|
|
|
|
2018-12-16 10:51:09 +03:00
|
|
|
case RTIMER_ARMING: return; /* success */
|
|
|
|
case RTIMER_ARMED:
|
2018-08-14 01:19:54 +03:00
|
|
|
/*
|
|
|
|
* it is possible to have another thread disarm, and
|
|
|
|
* a third thread arm finish re-arming before we get
|
|
|
|
* here, so we wasted a syscall with timer_settime but
|
|
|
|
* probably unavoidable in a signal handler.
|
|
|
|
*/
|
|
|
|
return;
|
2018-12-16 10:51:09 +03:00
|
|
|
case RTIMER_DEAD:
|
2018-12-16 10:51:11 +03:00
|
|
|
/* may race harmlessly with ubf_timer_destroy */
|
2018-12-16 10:51:09 +03:00
|
|
|
(void)timer_settime(timer_posix.timerid, 0, &zero, 0);
|
|
|
|
return;
|
2018-08-14 00:34:24 +03:00
|
|
|
default:
|
|
|
|
rb_async_bug_errno("UBF_TIMER_POSIX unknown state", ERANGE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#elif UBF_TIMER == UBF_TIMER_PTHREAD
|
2023-03-03 10:40:00 +03:00
|
|
|
if (!fork_gen || fork_gen == timer_pthread.fork_gen) {
|
2018-08-14 00:34:24 +03:00
|
|
|
if (ATOMIC_EXCHANGE(timer_pthread.armed, 1) == 0)
|
|
|
|
rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-03-10 08:00:10 +04:00
|
|
|
void
|
2018-08-14 00:34:20 +03:00
|
|
|
rb_thread_wakeup_timer_thread(int sig)
|
2013-03-10 08:00:10 +04:00
|
|
|
{
|
2018-08-16 11:26:56 +03:00
|
|
|
/* non-sighandler path */
|
|
|
|
if (sig <= 0) {
|
|
|
|
rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
|
|
|
|
if (sig < 0) {
|
|
|
|
ubf_timer_arm(0);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-08-14 12:44:10 +03:00
|
|
|
/* must be safe inside sighandler, so no mutex */
|
2023-03-03 10:40:00 +03:00
|
|
|
if (signal_self_pipe.fork_gen == current_fork_gen) {
|
2018-08-15 07:32:41 +03:00
|
|
|
rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
|
2018-08-14 00:34:20 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* system_working check is required because vm and main_thread are
|
|
|
|
* freed during shutdown
|
|
|
|
*/
|
2018-08-16 11:26:56 +03:00
|
|
|
if (system_working > 0) {
|
2018-08-14 00:34:20 +03:00
|
|
|
volatile rb_execution_context_t *ec;
|
|
|
|
rb_vm_t *vm = GET_VM();
|
|
|
|
rb_thread_t *mth;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: root VM and main_thread should be static and not
|
|
|
|
* on heap for maximum safety (and startup/shutdown speed)
|
|
|
|
*/
|
|
|
|
if (!vm) return;
|
2020-03-09 20:22:11 +03:00
|
|
|
mth = vm->ractor.main_thread;
|
2018-08-14 00:34:24 +03:00
|
|
|
if (!mth || system_working <= 0) return;
|
2018-08-14 00:34:20 +03:00
|
|
|
|
|
|
|
/* this relies on GC for grace period before cont_free */
|
|
|
|
ec = ACCESS_ONCE(rb_execution_context_t *, mth->ec);
|
|
|
|
|
2018-08-14 00:34:24 +03:00
|
|
|
if (ec) {
|
|
|
|
RUBY_VM_SET_TRAP_INTERRUPT(ec);
|
2023-03-03 10:40:00 +03:00
|
|
|
ubf_timer_arm(current_fork_gen);
|
2019-01-04 16:14:11 +03:00
|
|
|
|
|
|
|
/* some ubfs can interrupt single-threaded process directly */
|
|
|
|
if (vm->ubf_async_safe && mth->unblock.func) {
|
|
|
|
(mth->unblock.func)(mth->unblock.arg);
|
|
|
|
}
|
2018-08-14 00:34:24 +03:00
|
|
|
}
|
|
|
|
}
|
2011-06-27 04:30:41 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-24 22:19:01 +03:00
|
|
|
#define CLOSE_INVALIDATE_PAIR(expr) \
|
|
|
|
close_invalidate_pair(expr,"close_invalidate: "#expr)
|
2011-06-27 04:30:41 +04:00
|
|
|
static void
|
2018-07-13 02:23:25 +03:00
|
|
|
close_invalidate(int *fdp, const char *msg)
|
2011-06-27 04:30:41 +04:00
|
|
|
{
|
2018-07-13 02:23:25 +03:00
|
|
|
int fd = *fdp;
|
2015-08-14 12:44:10 +03:00
|
|
|
|
|
|
|
*fdp = -1;
|
|
|
|
if (close(fd) < 0) {
|
2015-09-06 18:27:25 +03:00
|
|
|
async_bug_fd(msg, errno, fd);
|
2011-06-27 04:30:41 +04:00
|
|
|
}
|
|
|
|
}
|
2008-11-06 22:02:35 +03:00
|
|
|
|
2018-08-24 22:19:01 +03:00
|
|
|
static void
|
|
|
|
close_invalidate_pair(int fds[2], const char *msg)
|
|
|
|
{
|
|
|
|
if (USE_EVENTFD && fds[0] == fds[1]) {
|
|
|
|
close_invalidate(&fds[0], msg);
|
|
|
|
fds[1] = -1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
close_invalidate(&fds[0], msg);
|
|
|
|
close_invalidate(&fds[1], msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-10 07:59:49 +04:00
|
|
|
static void
|
|
|
|
set_nonblock(int fd)
|
|
|
|
{
|
|
|
|
int oflags;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
oflags = fcntl(fd, F_GETFL);
|
|
|
|
if (oflags == -1)
|
|
|
|
rb_sys_fail(0);
|
|
|
|
oflags |= O_NONBLOCK;
|
|
|
|
err = fcntl(fd, F_SETFL, oflags);
|
|
|
|
if (err == -1)
|
|
|
|
rb_sys_fail(0);
|
|
|
|
}
|
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
/* communication pipe with timer thread and signal handler */
|
2015-08-14 12:44:10 +03:00
|
|
|
static int
|
2013-03-10 08:00:10 +04:00
|
|
|
setup_communication_pipe_internal(int pipes[2])
|
2013-03-10 07:59:49 +04:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2018-07-13 02:23:25 +03:00
|
|
|
if (pipes[0] >= 0 || pipes[1] >= 0) {
|
|
|
|
VM_ASSERT(pipes[0] >= 0);
|
|
|
|
VM_ASSERT(pipes[1] >= 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-24 22:19:01 +03:00
|
|
|
/*
|
|
|
|
* Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
|
|
|
|
* missing EFD_* flags, they can fall back to pipe
|
|
|
|
*/
|
|
|
|
#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
|
|
|
|
pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
|
|
|
|
if (pipes[0] >= 0) {
|
|
|
|
rb_update_max_fd(pipes[0]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-10 08:00:10 +04:00
|
|
|
err = rb_cloexec_pipe(pipes);
|
|
|
|
if (err != 0) {
|
2018-07-11 11:49:23 +03:00
|
|
|
rb_warn("pipe creation failed for timer: %s, scheduling broken",
|
2015-08-14 12:44:10 +03:00
|
|
|
strerror(errno));
|
|
|
|
return -1;
|
2013-03-10 08:00:10 +04:00
|
|
|
}
|
|
|
|
rb_update_max_fd(pipes[0]);
|
|
|
|
rb_update_max_fd(pipes[1]);
|
|
|
|
set_nonblock(pipes[0]);
|
|
|
|
set_nonblock(pipes[1]);
|
2015-08-14 12:44:10 +03:00
|
|
|
return 0;
|
2013-03-10 08:00:10 +04:00
|
|
|
}
|
2013-03-10 07:59:49 +04:00
|
|
|
|
2015-12-03 05:57:14 +03:00
|
|
|
#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
|
|
|
|
# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
|
2013-10-03 05:37:00 +04:00
|
|
|
#endif
|
|
|
|
|
2020-05-01 12:46:09 +03:00
|
|
|
enum {
|
|
|
|
THREAD_NAME_MAX =
|
2020-04-23 14:17:24 +03:00
|
|
|
#if defined(__linux__)
|
2020-05-01 12:46:09 +03:00
|
|
|
16
|
2020-04-23 14:17:24 +03:00
|
|
|
#elif defined(__APPLE__)
|
|
|
|
/* Undocumented, and main thread seems unlimited */
|
2020-05-01 12:46:09 +03:00
|
|
|
64
|
2020-04-23 14:17:24 +03:00
|
|
|
#else
|
2020-05-01 12:46:09 +03:00
|
|
|
16
|
2020-04-23 14:17:24 +03:00
|
|
|
#endif
|
2020-05-01 12:46:09 +03:00
|
|
|
};
|
2020-04-23 14:17:24 +03:00
|
|
|
|
2018-11-08 08:01:23 +03:00
|
|
|
static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
|
|
|
|
|
2014-09-21 09:14:47 +04:00
|
|
|
static void
|
|
|
|
native_set_thread_name(rb_thread_t *th)
|
|
|
|
{
|
2015-12-03 05:57:14 +03:00
|
|
|
#ifdef SET_CURRENT_THREAD_NAME
|
2018-11-08 08:01:23 +03:00
|
|
|
VALUE loc;
|
|
|
|
if (!NIL_P(loc = th->name)) {
|
|
|
|
SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
|
|
|
|
}
|
|
|
|
else if ((loc = threadptr_invoke_proc_location(th)) != Qnil) {
|
|
|
|
char *name, *p;
|
2020-05-01 12:46:09 +03:00
|
|
|
char buf[THREAD_NAME_MAX];
|
2018-11-08 08:01:23 +03:00
|
|
|
size_t len;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
name = RSTRING_PTR(RARRAY_AREF(loc, 0));
|
|
|
|
p = strrchr(name, '/'); /* show only the basename of the path. */
|
|
|
|
if (p && p[1])
|
2020-04-23 14:17:24 +03:00
|
|
|
name = p + 1;
|
2018-11-08 08:01:23 +03:00
|
|
|
|
|
|
|
n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(RARRAY_AREF(loc, 1)));
|
2021-11-05 16:51:53 +03:00
|
|
|
RB_GC_GUARD(loc);
|
2018-11-08 08:01:23 +03:00
|
|
|
|
|
|
|
len = (size_t)n;
|
|
|
|
if (len >= sizeof(buf)) {
|
|
|
|
buf[sizeof(buf)-2] = '*';
|
|
|
|
buf[sizeof(buf)-1] = '\0';
|
|
|
|
}
|
|
|
|
SET_CURRENT_THREAD_NAME(buf);
|
2014-09-21 09:14:47 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-04-23 14:17:24 +03:00
|
|
|
static void
|
2017-08-26 03:30:03 +03:00
|
|
|
native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
|
|
|
|
{
|
2020-04-23 14:17:24 +03:00
|
|
|
#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
|
2020-05-01 12:46:09 +03:00
|
|
|
char buf[THREAD_NAME_MAX];
|
2017-08-26 03:30:03 +03:00
|
|
|
const char *s = "";
|
2020-04-23 14:17:24 +03:00
|
|
|
# if !defined SET_ANOTHER_THREAD_NAME
|
2020-04-23 16:19:28 +03:00
|
|
|
if (!pthread_equal(pthread_self(), thread_id)) return;
|
2020-04-23 14:17:24 +03:00
|
|
|
# endif
|
|
|
|
if (!NIL_P(name)) {
|
|
|
|
long n;
|
|
|
|
RSTRING_GETMEM(name, s, n);
|
|
|
|
if (n >= (int)sizeof(buf)) {
|
|
|
|
memcpy(buf, s, sizeof(buf)-1);
|
|
|
|
buf[sizeof(buf)-1] = '\0';
|
|
|
|
s = buf;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# if defined SET_ANOTHER_THREAD_NAME
|
2017-08-26 03:30:03 +03:00
|
|
|
SET_ANOTHER_THREAD_NAME(thread_id, s);
|
2020-04-23 14:17:24 +03:00
|
|
|
# elif defined SET_CURRENT_THREAD_NAME
|
|
|
|
SET_CURRENT_THREAD_NAME(s);
|
|
|
|
# endif
|
2017-08-26 03:30:03 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-06-01 16:27:13 +03:00
|
|
|
#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
|
2021-05-22 15:36:27 +03:00
|
|
|
static VALUE
|
|
|
|
native_thread_native_thread_id(rb_thread_t *target_th)
|
|
|
|
{
|
|
|
|
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
2022-04-22 15:19:03 +03:00
|
|
|
int tid = target_th->nt->tid;
|
2021-05-22 15:36:27 +03:00
|
|
|
if (tid == 0) return Qnil;
|
|
|
|
return INT2FIX(tid);
|
|
|
|
#elif defined(__APPLE__)
|
2022-05-22 08:18:27 +03:00
|
|
|
uint64_t tid;
|
2022-10-17 08:46:10 +03:00
|
|
|
# if (!defined(MAC_OS_X_VERSION_10_6) || \
|
|
|
|
(MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
|
2022-05-24 14:26:18 +03:00
|
|
|
defined(__POWERPC__) /* never defined for PowerPC platforms */)
|
|
|
|
const bool no_pthread_threadid_np = true;
|
|
|
|
# define NO_PTHREAD_MACH_THREAD_NP 1
|
|
|
|
# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
|
|
|
|
const bool no_pthread_threadid_np = false;
|
|
|
|
# else
|
|
|
|
# if !(defined(__has_attribute) && __has_attribute(availability))
|
|
|
|
/* __API_AVAILABLE macro does nothing on gcc */
|
|
|
|
__attribute__((weak)) int pthread_threadid_np(pthread_t, uint64_t*);
|
|
|
|
# endif
|
|
|
|
/* Check weakly linked symbol */
|
|
|
|
const bool no_pthread_threadid_np = !&pthread_threadid_np;
|
|
|
|
# endif
|
|
|
|
if (no_pthread_threadid_np) {
|
2022-05-23 05:04:42 +03:00
|
|
|
return ULL2NUM(pthread_mach_thread_np(pthread_self()));
|
|
|
|
}
|
2022-05-24 14:26:18 +03:00
|
|
|
# ifndef NO_PTHREAD_MACH_THREAD_NP
|
2022-05-22 08:18:27 +03:00
|
|
|
int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
|
|
|
|
if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
|
|
|
|
return ULL2NUM((unsigned long long)tid);
|
2022-05-24 14:26:18 +03:00
|
|
|
# endif
|
2021-05-22 15:36:27 +03:00
|
|
|
#endif
|
|
|
|
}
|
2021-06-01 16:27:13 +03:00
|
|
|
# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
|
|
|
|
#else
|
|
|
|
# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
|
|
|
|
#endif
|
2021-05-22 15:36:27 +03:00
|
|
|
|
2018-08-14 00:34:24 +03:00
|
|
|
static void
|
2018-08-15 07:32:46 +03:00
|
|
|
ubf_timer_invalidate(void)
|
2018-08-14 00:34:24 +03:00
|
|
|
{
|
|
|
|
#if UBF_TIMER == UBF_TIMER_PTHREAD
|
2018-08-24 22:19:01 +03:00
|
|
|
CLOSE_INVALIDATE_PAIR(timer_pthread.low);
|
2018-08-14 00:34:24 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2023-03-03 10:40:00 +03:00
|
|
|
ubf_timer_pthread_create(rb_serial_t fork_gen)
|
2018-08-14 00:34:24 +03:00
|
|
|
{
|
|
|
|
#if UBF_TIMER == UBF_TIMER_PTHREAD
|
|
|
|
int err;
|
2023-03-03 10:40:00 +03:00
|
|
|
if (timer_pthread.fork_gen == fork_gen)
|
2018-08-14 00:34:24 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (setup_communication_pipe_internal(timer_pthread.low) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
err = pthread_create(&timer_pthread.thid, 0, timer_pthread_fn, GET_VM());
|
|
|
|
if (!err)
|
2023-03-03 10:40:00 +03:00
|
|
|
timer_pthread.fork_gen = fork_gen;
|
2018-08-14 00:34:24 +03:00
|
|
|
else
|
|
|
|
rb_warn("pthread_create failed for timer: %s, signals racy",
|
|
|
|
strerror(err));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2023-03-03 10:40:00 +03:00
|
|
|
ubf_timer_create(rb_serial_t fork_gen)
|
2018-08-14 00:34:24 +03:00
|
|
|
{
|
|
|
|
#if UBF_TIMER == UBF_TIMER_POSIX
|
2018-08-14 05:24:37 +03:00
|
|
|
# if defined(__sun)
|
|
|
|
# define UBF_TIMER_CLOCK CLOCK_REALTIME
|
|
|
|
# else /* Tested Linux and FreeBSD: */
|
|
|
|
# define UBF_TIMER_CLOCK CLOCK_MONOTONIC
|
|
|
|
# endif
|
|
|
|
|
2018-08-14 00:34:24 +03:00
|
|
|
struct sigevent sev;
|
|
|
|
|
|
|
|
sev.sigev_notify = SIGEV_SIGNAL;
|
|
|
|
sev.sigev_signo = SIGVTALRM;
|
|
|
|
sev.sigev_value.sival_ptr = &timer_posix;
|
2018-12-16 10:51:09 +03:00
|
|
|
|
|
|
|
if (!timer_create(UBF_TIMER_CLOCK, &sev, &timer_posix.timerid)) {
|
2021-02-02 12:23:07 +03:00
|
|
|
rb_atomic_t prev = timer_state_exchange(RTIMER_DISARM);
|
2018-12-16 10:51:09 +03:00
|
|
|
|
|
|
|
if (prev != RTIMER_DEAD) {
|
|
|
|
rb_bug("timer_posix was not dead: %u\n", (unsigned)prev);
|
|
|
|
}
|
2023-03-03 10:40:00 +03:00
|
|
|
timer_posix.fork_gen = fork_gen;
|
2018-12-16 10:51:09 +03:00
|
|
|
}
|
|
|
|
else {
|
2018-08-14 00:34:24 +03:00
|
|
|
rb_warn("timer_create failed: %s, signals racy", strerror(errno));
|
2018-12-16 10:51:09 +03:00
|
|
|
}
|
2018-08-14 00:34:24 +03:00
|
|
|
#endif
|
2018-08-15 07:32:36 +03:00
|
|
|
if (UBF_TIMER == UBF_TIMER_PTHREAD)
|
2023-03-03 10:40:00 +03:00
|
|
|
ubf_timer_pthread_create(fork_gen);
|
2018-08-14 00:34:24 +03:00
|
|
|
}
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
static void
|
|
|
|
rb_thread_create_timer_thread(void)
|
|
|
|
{
|
2018-08-14 00:34:20 +03:00
|
|
|
/* we only create the pipe, and lazy-spawn */
|
2023-03-03 10:40:00 +03:00
|
|
|
rb_serial_t fork_gen = signal_self_pipe.fork_gen;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2023-03-03 10:40:00 +03:00
|
|
|
if (fork_gen && fork_gen != current_fork_gen) {
|
2018-08-24 22:19:01 +03:00
|
|
|
CLOSE_INVALIDATE_PAIR(signal_self_pipe.normal);
|
2018-11-01 17:10:47 +03:00
|
|
|
CLOSE_INVALIDATE_PAIR(signal_self_pipe.ub_main);
|
2018-08-15 07:32:46 +03:00
|
|
|
ubf_timer_invalidate();
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
2011-06-27 04:30:41 +04:00
|
|
|
|
2018-08-15 07:32:41 +03:00
|
|
|
if (setup_communication_pipe_internal(signal_self_pipe.normal) < 0) return;
|
2018-11-01 17:10:47 +03:00
|
|
|
if (setup_communication_pipe_internal(signal_self_pipe.ub_main) < 0) return;
|
2011-06-27 04:30:41 +04:00
|
|
|
|
2023-03-03 10:40:00 +03:00
|
|
|
ubf_timer_create(current_fork_gen);
|
|
|
|
if (fork_gen != current_fork_gen) {
|
2018-08-14 00:34:20 +03:00
|
|
|
/* validate pipe on this process */
|
|
|
|
sigwait_th = THREAD_INVALID;
|
2023-03-03 10:40:00 +03:00
|
|
|
signal_self_pipe.fork_gen = current_fork_gen;
|
2012-06-10 16:51:37 +04:00
|
|
|
}
|
2018-08-14 00:34:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-08-15 07:32:46 +03:00
|
|
|
ubf_timer_disarm(void)
|
2018-08-14 00:34:24 +03:00
|
|
|
{
|
|
|
|
#if UBF_TIMER == UBF_TIMER_POSIX
|
2018-12-16 10:51:09 +03:00
|
|
|
rb_atomic_t prev;
|
2018-08-14 00:34:24 +03:00
|
|
|
|
2023-03-03 10:40:00 +03:00
|
|
|
if (timer_posix.fork_gen && timer_posix.fork_gen != current_fork_gen) return;
|
2021-02-02 12:23:07 +03:00
|
|
|
prev = timer_state_cas(RTIMER_ARMED, RTIMER_DISARM);
|
2018-12-16 10:51:09 +03:00
|
|
|
switch (prev) {
|
|
|
|
case RTIMER_DISARM: return; /* likely */
|
|
|
|
case RTIMER_ARMING: return; /* ubf_timer_arm will disarm itself */
|
|
|
|
case RTIMER_ARMED:
|
2018-12-20 03:07:19 +03:00
|
|
|
if (timer_settime(timer_posix.timerid, 0, &zero, 0)) {
|
|
|
|
int err = errno;
|
|
|
|
|
|
|
|
if (err == EINVAL) {
|
2021-02-02 12:23:07 +03:00
|
|
|
prev = timer_state_cas(RTIMER_DISARM, RTIMER_DISARM);
|
2018-12-20 03:07:19 +03:00
|
|
|
|
|
|
|
/* main thread may have killed the timer */
|
|
|
|
if (prev == RTIMER_DEAD) return;
|
|
|
|
|
|
|
|
rb_bug_errno("timer_settime (disarm)", err);
|
|
|
|
}
|
|
|
|
}
|
2018-08-14 00:34:24 +03:00
|
|
|
return;
|
2018-12-16 10:51:09 +03:00
|
|
|
case RTIMER_DEAD: return; /* stay dead */
|
2018-08-14 00:34:24 +03:00
|
|
|
default:
|
2018-12-16 10:51:09 +03:00
|
|
|
rb_bug("UBF_TIMER_POSIX bad state: %u\n", (unsigned)prev);
|
2018-08-14 00:34:24 +03:00
|
|
|
}
|
2018-12-16 10:51:09 +03:00
|
|
|
|
2018-08-14 00:34:24 +03:00
|
|
|
#elif UBF_TIMER == UBF_TIMER_PTHREAD
|
|
|
|
ATOMIC_SET(timer_pthread.armed, 0);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-08-15 07:32:46 +03:00
|
|
|
ubf_timer_destroy(void)
|
2018-08-14 00:34:24 +03:00
|
|
|
{
|
2018-12-16 10:51:09 +03:00
|
|
|
#if UBF_TIMER == UBF_TIMER_POSIX
|
2023-03-03 10:40:00 +03:00
|
|
|
if (timer_posix.fork_gen == current_fork_gen) {
|
2018-12-18 21:25:54 +03:00
|
|
|
rb_atomic_t expect = RTIMER_DISARM;
|
|
|
|
size_t i, max = 10000000;
|
2018-12-16 10:51:09 +03:00
|
|
|
|
2018-12-18 21:25:54 +03:00
|
|
|
/* prevent signal handler from arming: */
|
|
|
|
for (i = 0; i < max; i++) {
|
2021-02-02 12:23:07 +03:00
|
|
|
switch (timer_state_cas(expect, RTIMER_DEAD)) {
|
2018-12-18 21:25:54 +03:00
|
|
|
case RTIMER_DISARM:
|
|
|
|
if (expect == RTIMER_DISARM) goto done;
|
|
|
|
expect = RTIMER_DISARM;
|
|
|
|
break;
|
|
|
|
case RTIMER_ARMING:
|
|
|
|
native_thread_yield(); /* let another thread finish arming */
|
|
|
|
expect = RTIMER_ARMED;
|
|
|
|
break;
|
|
|
|
case RTIMER_ARMED:
|
|
|
|
if (expect == RTIMER_ARMED) {
|
|
|
|
if (timer_settime(timer_posix.timerid, 0, &zero, 0))
|
|
|
|
rb_bug_errno("timer_settime (destroy)", errno);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
expect = RTIMER_ARMED;
|
|
|
|
break;
|
|
|
|
case RTIMER_DEAD:
|
|
|
|
rb_bug("RTIMER_DEAD unexpected");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rb_bug("timed out waiting for timer to arm");
|
|
|
|
done:
|
2018-12-16 10:51:09 +03:00
|
|
|
if (timer_delete(timer_posix.timerid) < 0)
|
|
|
|
rb_sys_fail("timer_delete");
|
|
|
|
|
2021-02-02 12:23:07 +03:00
|
|
|
VM_ASSERT(timer_state_exchange(RTIMER_DEAD) == RTIMER_DEAD);
|
2018-12-16 10:51:09 +03:00
|
|
|
}
|
|
|
|
#elif UBF_TIMER == UBF_TIMER_PTHREAD
|
2018-08-23 22:49:35 +03:00
|
|
|
int err;
|
|
|
|
|
2023-03-03 10:40:00 +03:00
|
|
|
timer_pthread.fork_gen = 0;
|
2018-08-23 22:49:35 +03:00
|
|
|
ubf_timer_disarm();
|
|
|
|
rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
|
|
|
|
err = pthread_join(timer_pthread.thid, 0);
|
|
|
|
if (err) {
|
|
|
|
rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
|
2018-08-14 00:34:24 +03:00
|
|
|
}
|
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
}
|
|
|
|
|
2008-11-08 18:31:05 +03:00
|
|
|
static int
|
2015-08-14 12:44:10 +03:00
|
|
|
native_stop_timer_thread(void)
|
2008-11-07 19:14:48 +03:00
|
|
|
{
|
2008-11-08 18:31:05 +03:00
|
|
|
int stopped;
|
|
|
|
stopped = --system_working <= 0;
|
2018-08-14 00:34:24 +03:00
|
|
|
if (stopped)
|
2018-08-15 07:32:46 +03:00
|
|
|
ubf_timer_destroy();
|
2011-06-27 04:30:41 +04:00
|
|
|
|
|
|
|
if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
|
2008-11-08 18:31:05 +03:00
|
|
|
return stopped;
|
2008-11-07 19:14:48 +03:00
|
|
|
}
|
2008-11-06 16:21:26 +03:00
|
|
|
|
2009-11-02 06:58:25 +03:00
|
|
|
static void
|
|
|
|
native_reset_timer_thread(void)
|
|
|
|
{
|
2011-06-27 04:30:41 +04:00
|
|
|
if (TT_DEBUG) fprintf(stderr, "reset timer thread\n");
|
2009-11-02 06:58:25 +03:00
|
|
|
}
|
|
|
|
|
2008-11-27 09:05:07 +03:00
|
|
|
#ifdef HAVE_SIGALTSTACK
|
|
|
|
int
|
|
|
|
ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
|
|
|
|
{
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
const size_t water_mark = 1024 * 1024;
|
|
|
|
STACK_GROW_DIR_DETECTION;
|
|
|
|
|
2014-01-28 11:33:20 +04:00
|
|
|
#ifdef STACKADDR_AVAILABLE
|
|
|
|
if (get_stack(&base, &size) == 0) {
|
|
|
|
# ifdef __APPLE__
|
2022-04-22 15:19:03 +03:00
|
|
|
if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
|
2014-01-27 16:53:48 +04:00
|
|
|
struct rlimit rlim;
|
|
|
|
if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
|
2014-01-27 17:16:25 +04:00
|
|
|
size = (size_t)rlim.rlim_cur;
|
2014-01-27 16:53:48 +04:00
|
|
|
}
|
|
|
|
}
|
2014-01-28 11:33:20 +04:00
|
|
|
# endif
|
|
|
|
base = (char *)base + STACK_DIR_UPPER(+size, -size);
|
|
|
|
}
|
|
|
|
else
|
2014-01-27 16:53:48 +04:00
|
|
|
#endif
|
2014-01-28 11:33:20 +04:00
|
|
|
if (th) {
|
2017-10-26 11:32:49 +03:00
|
|
|
size = th->ec->machine.stack_maxsize;
|
|
|
|
base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
|
2008-11-27 09:05:07 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
return 0;
|
|
|
|
}
|
2012-10-04 06:39:34 +04:00
|
|
|
size /= RUBY_STACK_SPACE_RATIO;
|
2008-11-27 09:05:07 +03:00
|
|
|
if (size > water_mark) size = water_mark;
|
2012-06-10 12:54:38 +04:00
|
|
|
if (IS_STACK_DIR_UPPER()) {
|
2008-11-27 09:05:07 +03:00
|
|
|
if (size > ~(size_t)base+1) size = ~(size_t)base+1;
|
|
|
|
if (addr > base && addr <= (void *)((char *)base + size)) return 1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (size > (size_t)base) size = (size_t)base;
|
|
|
|
if (addr > (void *)((char *)base - size) && addr <= base) return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-07-04 01:56:59 +04:00
|
|
|
int
|
|
|
|
rb_reserved_fd_p(int fd)
|
|
|
|
{
|
2018-11-20 10:27:28 +03:00
|
|
|
/* no false-positive if out-of-FD at startup */
|
|
|
|
if (fd < 0)
|
|
|
|
return 0;
|
|
|
|
|
2018-08-14 00:34:24 +03:00
|
|
|
#if UBF_TIMER == UBF_TIMER_PTHREAD
|
|
|
|
if (fd == timer_pthread.low[0] || fd == timer_pthread.low[1])
|
2023-03-03 10:40:00 +03:00
|
|
|
goto check_fork_gen;
|
2018-08-14 00:34:24 +03:00
|
|
|
#endif
|
2018-08-15 07:32:41 +03:00
|
|
|
if (fd == signal_self_pipe.normal[0] || fd == signal_self_pipe.normal[1])
|
2023-03-03 10:40:00 +03:00
|
|
|
goto check_fork_gen;
|
2018-11-01 17:10:47 +03:00
|
|
|
if (fd == signal_self_pipe.ub_main[0] || fd == signal_self_pipe.ub_main[1])
|
2023-03-03 10:40:00 +03:00
|
|
|
goto check_fork_gen;
|
2018-08-14 00:34:24 +03:00
|
|
|
return 0;
|
2023-03-03 10:40:00 +03:00
|
|
|
check_fork_gen:
|
|
|
|
if (signal_self_pipe.fork_gen == current_fork_gen) /* async-signal-safe */
|
2011-07-04 01:56:59 +04:00
|
|
|
return 1;
|
2018-08-14 00:34:24 +03:00
|
|
|
return 0;
|
2011-07-04 01:56:59 +04:00
|
|
|
}
|
|
|
|
|
2013-07-23 14:46:37 +04:00
|
|
|
rb_nativethread_id_t
|
|
|
|
rb_nativethread_self(void)
|
|
|
|
{
|
|
|
|
return pthread_self();
|
|
|
|
}
|
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
int
|
|
|
|
rb_sigwait_fd_get(const rb_thread_t *th)
|
|
|
|
{
|
2018-08-17 05:06:54 +03:00
|
|
|
if (signal_self_pipe.normal[0] >= 0) {
|
2023-03-03 10:40:00 +03:00
|
|
|
VM_ASSERT(signal_self_pipe.fork_gen == current_fork_gen);
|
2018-08-14 00:34:24 +03:00
|
|
|
/*
|
|
|
|
* no need to keep firing the timer if any thread is sleeping
|
|
|
|
* on the signal self-pipe
|
|
|
|
*/
|
2018-08-15 07:32:46 +03:00
|
|
|
ubf_timer_disarm();
|
2018-08-14 00:34:24 +03:00
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
if (ATOMIC_PTR_CAS(sigwait_th, THREAD_INVALID, th) == THREAD_INVALID) {
|
2018-08-15 07:32:41 +03:00
|
|
|
return signal_self_pipe.normal[0];
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
|
|
|
}
|
2018-08-26 00:59:30 +03:00
|
|
|
return -1; /* avoid thundering herd and work stealing/starvation */
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_sigwait_fd_put(const rb_thread_t *th, int fd)
|
|
|
|
{
|
|
|
|
const rb_thread_t *old;
|
|
|
|
|
2018-08-15 07:32:41 +03:00
|
|
|
VM_ASSERT(signal_self_pipe.normal[0] == fd);
|
2018-08-14 00:34:20 +03:00
|
|
|
old = ATOMIC_PTR_EXCHANGE(sigwait_th, THREAD_INVALID);
|
|
|
|
if (old != th) assert(old == th);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef HAVE_PPOLL
|
|
|
|
/* TODO: don't ignore sigmask */
|
|
|
|
static int
|
|
|
|
ruby_ppoll(struct pollfd *fds, nfds_t nfds,
|
|
|
|
const struct timespec *ts, const sigset_t *sigmask)
|
|
|
|
{
|
|
|
|
int timeout_ms;
|
|
|
|
|
|
|
|
if (ts) {
|
|
|
|
int tmp, tmp2;
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
if (ts->tv_sec > INT_MAX/1000)
|
|
|
|
timeout_ms = INT_MAX;
|
|
|
|
else {
|
|
|
|
tmp = (int)(ts->tv_sec * 1000);
|
|
|
|
/* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
|
|
|
|
tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
|
|
|
|
if (INT_MAX - tmp < tmp2)
|
|
|
|
timeout_ms = INT_MAX;
|
|
|
|
else
|
|
|
|
timeout_ms = (int)(tmp + tmp2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
timeout_ms = -1;
|
|
|
|
|
|
|
|
return poll(fds, nfds, timeout_ms);
|
|
|
|
}
|
|
|
|
# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void
|
2018-10-19 23:14:41 +03:00
|
|
|
rb_sigwait_sleep(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *rel)
|
2018-08-14 00:34:20 +03:00
|
|
|
{
|
|
|
|
struct pollfd pfd;
|
2018-10-19 23:14:41 +03:00
|
|
|
struct timespec ts;
|
2018-08-14 00:34:20 +03:00
|
|
|
|
|
|
|
pfd.fd = sigwait_fd;
|
|
|
|
pfd.events = POLLIN;
|
|
|
|
|
|
|
|
if (!BUSY_WAIT_SIGNALS && ubf_threads_empty()) {
|
2018-10-19 23:14:41 +03:00
|
|
|
(void)ppoll(&pfd, 1, rb_hrtime2timespec(&ts, rel), 0);
|
2018-08-14 00:34:20 +03:00
|
|
|
check_signals_nogvl(th, sigwait_fd);
|
|
|
|
}
|
|
|
|
else {
|
2022-05-23 21:58:18 +03:00
|
|
|
rb_hrtime_t to = RB_HRTIME_MAX, end = 0;
|
2018-08-14 00:34:20 +03:00
|
|
|
int n = 0;
|
|
|
|
|
2018-10-19 23:14:41 +03:00
|
|
|
if (rel) {
|
|
|
|
to = *rel;
|
|
|
|
end = rb_hrtime_add(rb_hrtime_now(), to);
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* tricky: this needs to return on spurious wakeup (no auto-retry).
|
|
|
|
* But we also need to distinguish between periodic quantum
|
|
|
|
* wakeups, so we care about the result of consume_communication_pipe
|
2018-11-10 02:24:14 +03:00
|
|
|
*
|
|
|
|
* We want to avoid spurious wakeup for Mutex#sleep compatibility
|
|
|
|
* [ruby-core:88102]
|
2018-08-14 00:34:20 +03:00
|
|
|
*/
|
|
|
|
for (;;) {
|
2018-10-19 23:14:41 +03:00
|
|
|
const rb_hrtime_t *sto = sigwait_timeout(th, sigwait_fd, &to, &n);
|
2018-08-25 09:58:35 +03:00
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
if (n) return;
|
2018-10-19 23:14:41 +03:00
|
|
|
n = ppoll(&pfd, 1, rb_hrtime2timespec(&ts, sto), 0);
|
2018-08-14 00:34:20 +03:00
|
|
|
if (check_signals_nogvl(th, sigwait_fd))
|
|
|
|
return;
|
|
|
|
if (n || (th && RUBY_VM_INTERRUPTED(th->ec)))
|
|
|
|
return;
|
2018-10-19 23:14:41 +03:00
|
|
|
if (rel && hrtime_update_expire(&to, end))
|
2018-08-14 00:34:20 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-01 17:10:47 +03:00
|
|
|
/*
|
|
|
|
* we need to guarantee wakeups from native_ppoll_sleep because
|
|
|
|
* ubf_select may not be going through ubf_list if other threads
|
|
|
|
* are all sleeping.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ubf_ppoll_sleep(void *ignore)
|
|
|
|
{
|
|
|
|
rb_thread_wakeup_timer_thread_fd(signal_self_pipe.ub_main[1]);
|
|
|
|
}
|
|
|
|
|
2018-12-13 12:25:46 +03:00
|
|
|
/*
|
|
|
|
* Single CPU setups benefit from explicit sched_yield() before ppoll(),
|
|
|
|
* since threads may be too starved to enter the GVL waitqueue for
|
|
|
|
* us to detect contention. Instead, we want to kick other threads
|
|
|
|
* so they can run and possibly prevent us from entering slow paths
|
|
|
|
* in ppoll() or similar syscalls.
|
|
|
|
*
|
|
|
|
* Confirmed on FreeBSD 11.2 and Linux 4.19.
|
|
|
|
* [ruby-core:90417] [Bug #15398]
|
|
|
|
*/
|
2022-04-16 21:40:23 +03:00
|
|
|
#define THREAD_BLOCKING_YIELD(th) do { \
|
2022-04-22 15:19:03 +03:00
|
|
|
const rb_thread_t *next; \
|
2022-04-16 21:40:23 +03:00
|
|
|
struct rb_thread_sched *sched = TH_SCHED(th); \
|
2023-03-07 21:41:04 +03:00
|
|
|
RB_VM_SAVE_MACHINE_CONTEXT(th); \
|
2022-04-16 21:40:23 +03:00
|
|
|
rb_native_mutex_lock(&sched->lock); \
|
2023-03-31 11:57:25 +03:00
|
|
|
next = thread_sched_to_waiting_common((sched), (th)); \
|
2022-04-16 21:40:23 +03:00
|
|
|
rb_native_mutex_unlock(&sched->lock); \
|
2020-03-09 20:22:11 +03:00
|
|
|
if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \
|
2018-12-13 12:25:46 +03:00
|
|
|
native_thread_yield(); \
|
|
|
|
}
|
|
|
|
|
2018-08-26 00:59:30 +03:00
|
|
|
/*
|
|
|
|
* This function does not exclusively acquire sigwait_fd, so it
|
|
|
|
* cannot safely read from it. However, it can be woken up in
|
|
|
|
* 4 ways:
|
|
|
|
*
|
2018-12-13 12:41:17 +03:00
|
|
|
* 1) ubf_ppoll_sleep (from another thread)
|
2018-08-26 00:59:30 +03:00
|
|
|
* 2) rb_thread_wakeup_timer_thread (from signal handler)
|
|
|
|
* 3) any unmasked signal hitting the process
|
|
|
|
* 4) periodic ubf timer wakeups (after 3)
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
native_ppoll_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
|
|
|
{
|
|
|
|
rb_native_mutex_lock(&th->interrupt_lock);
|
2018-11-01 17:10:47 +03:00
|
|
|
th->unblock.func = ubf_ppoll_sleep;
|
2018-08-26 00:59:30 +03:00
|
|
|
rb_native_mutex_unlock(&th->interrupt_lock);
|
|
|
|
|
2022-04-16 21:40:23 +03:00
|
|
|
THREAD_BLOCKING_YIELD(th);
|
|
|
|
{
|
|
|
|
if (!RUBY_VM_INTERRUPTED(th->ec)) {
|
|
|
|
struct pollfd pfd[2];
|
|
|
|
struct timespec ts;
|
|
|
|
|
|
|
|
pfd[0].fd = signal_self_pipe.normal[0]; /* sigwait_fd */
|
|
|
|
pfd[1].fd = signal_self_pipe.ub_main[0];
|
|
|
|
pfd[0].events = pfd[1].events = POLLIN;
|
|
|
|
if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) {
|
|
|
|
if (pfd[1].revents & POLLIN) {
|
|
|
|
(void)consume_communication_pipe(pfd[1].fd);
|
|
|
|
}
|
2018-11-01 17:10:47 +03:00
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
/*
|
|
|
|
* do not read the sigwait_fd, here, let uplevel callers
|
|
|
|
* or other threads that, otherwise we may steal and starve
|
|
|
|
* other threads
|
|
|
|
*/
|
2018-11-01 17:10:47 +03:00
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
unblock_function_clear(th);
|
2018-08-26 00:59:30 +03:00
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
THREAD_BLOCKING_END(th);
|
2018-08-26 00:59:30 +03:00
|
|
|
}
|
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
static void
|
2018-08-25 09:58:35 +03:00
|
|
|
native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
2018-08-14 00:34:20 +03:00
|
|
|
{
|
|
|
|
int sigwait_fd = rb_sigwait_fd_get(th);
|
2020-03-09 20:22:11 +03:00
|
|
|
rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
|
2018-08-14 00:34:20 +03:00
|
|
|
|
2022-07-07 16:20:35 +03:00
|
|
|
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED);
|
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
if (sigwait_fd >= 0) {
|
|
|
|
rb_native_mutex_lock(&th->interrupt_lock);
|
|
|
|
th->unblock.func = ubf_sigwait;
|
|
|
|
rb_native_mutex_unlock(&th->interrupt_lock);
|
|
|
|
|
2022-04-16 21:40:23 +03:00
|
|
|
THREAD_BLOCKING_YIELD(th);
|
|
|
|
{
|
|
|
|
if (!RUBY_VM_INTERRUPTED(th->ec)) {
|
|
|
|
rb_sigwait_sleep(th, sigwait_fd, rel);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
check_signals_nogvl(th, sigwait_fd);
|
|
|
|
}
|
|
|
|
unblock_function_clear(th);
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
2022-04-16 21:40:23 +03:00
|
|
|
THREAD_BLOCKING_END(th);
|
|
|
|
|
2018-08-14 00:34:20 +03:00
|
|
|
rb_sigwait_fd_put(th, sigwait_fd);
|
|
|
|
}
|
2020-03-09 20:22:11 +03:00
|
|
|
else if (th == th->vm->ractor.main_thread) { /* always able to handle signals */
|
2018-08-26 00:59:30 +03:00
|
|
|
native_ppoll_sleep(th, rel);
|
|
|
|
}
|
2018-08-14 00:34:20 +03:00
|
|
|
else {
|
2018-08-25 09:58:35 +03:00
|
|
|
native_cond_sleep(th, rel);
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
2020-03-09 20:22:11 +03:00
|
|
|
|
|
|
|
rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
|
2018-08-14 00:34:20 +03:00
|
|
|
}
|
2018-08-14 00:34:24 +03:00
|
|
|
|
|
|
|
#if UBF_TIMER == UBF_TIMER_PTHREAD
|
|
|
|
static void *
|
|
|
|
timer_pthread_fn(void *p)
|
|
|
|
{
|
|
|
|
rb_vm_t *vm = p;
|
2022-04-23 00:26:26 +03:00
|
|
|
pthread_t main_thread_id = vm->ractor.main_thread->nt->thread_id;
|
2018-08-14 00:34:24 +03:00
|
|
|
struct pollfd pfd;
|
|
|
|
int timeout = -1;
|
2019-05-25 02:34:34 +03:00
|
|
|
int ccp;
|
2018-08-14 00:34:24 +03:00
|
|
|
|
|
|
|
pfd.fd = timer_pthread.low[0];
|
|
|
|
pfd.events = POLLIN;
|
|
|
|
|
|
|
|
while (system_working > 0) {
|
|
|
|
(void)poll(&pfd, 1, timeout);
|
2019-05-25 02:34:34 +03:00
|
|
|
ccp = consume_communication_pipe(pfd.fd);
|
2018-08-14 00:34:24 +03:00
|
|
|
|
2019-05-25 02:34:34 +03:00
|
|
|
if (system_working > 0) {
|
2019-05-25 03:51:27 +03:00
|
|
|
if (ATOMIC_CAS(timer_pthread.armed, 1, 1)) {
|
|
|
|
pthread_kill(main_thread_id, SIGVTALRM);
|
|
|
|
|
|
|
|
if (rb_signal_buff_size() || !ubf_threads_empty()) {
|
|
|
|
timeout = TIME_QUANTUM_MSEC;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ATOMIC_SET(timer_pthread.armed, 0);
|
|
|
|
timeout = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (ccp) {
|
|
|
|
pthread_kill(main_thread_id, SIGVTALRM);
|
|
|
|
ATOMIC_SET(timer_pthread.armed, 0);
|
|
|
|
timeout = -1;
|
|
|
|
}
|
|
|
|
}
|
2018-08-14 00:34:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* UBF_TIMER_PTHREAD */
|
2019-01-04 15:53:06 +03:00
|
|
|
|
|
|
|
static VALUE
|
2019-08-26 09:53:57 +03:00
|
|
|
ubf_caller(void *ignore)
|
2019-01-04 15:53:06 +03:00
|
|
|
{
|
|
|
|
rb_thread_sleep_forever();
|
|
|
|
|
|
|
|
return Qfalse;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called if and only if one thread is running, and
|
|
|
|
* the unblock function is NOT async-signal-safe
|
|
|
|
* This assumes USE_THREAD_CACHE is true for performance reasons
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
rb_thread_start_unblock_thread(void)
|
|
|
|
{
|
|
|
|
return rb_thread_create(ubf_caller, 0);
|
|
|
|
}
|
2023-03-31 11:12:46 +03:00
|
|
|
|
|
|
|
// thread internal event hooks (only for pthread)
|
|
|
|
|
|
|
|
struct rb_internal_thread_event_hook {
|
|
|
|
rb_internal_thread_event_callback callback;
|
|
|
|
rb_event_flag_t event;
|
|
|
|
void *user_data;
|
|
|
|
|
|
|
|
struct rb_internal_thread_event_hook *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
|
|
|
|
|
|
|
|
rb_internal_thread_event_hook_t *
|
|
|
|
rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
|
|
|
|
{
|
|
|
|
rb_internal_thread_event_hook_t *hook = ALLOC_N(rb_internal_thread_event_hook_t, 1);
|
|
|
|
hook->callback = callback;
|
|
|
|
hook->user_data = user_data;
|
|
|
|
hook->event = internal_event;
|
|
|
|
|
|
|
|
int r;
|
|
|
|
if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
|
|
|
|
rb_bug_errno("pthread_rwlock_wrlock", r);
|
|
|
|
}
|
|
|
|
|
|
|
|
hook->next = rb_internal_thread_event_hooks;
|
|
|
|
ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
|
|
|
|
|
|
|
|
if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
|
|
|
|
rb_bug_errno("pthread_rwlock_unlock", r);
|
|
|
|
}
|
|
|
|
return hook;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
|
|
|
|
rb_bug_errno("pthread_rwlock_wrlock", r);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool success = FALSE;
|
|
|
|
|
|
|
|
if (rb_internal_thread_event_hooks == hook) {
|
|
|
|
ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
|
|
|
|
success = TRUE;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (h->next == hook) {
|
|
|
|
h->next = hook->next;
|
|
|
|
success = TRUE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while ((h = h->next));
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
|
|
|
|
rb_bug_errno("pthread_rwlock_unlock", r);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (success) {
|
|
|
|
ruby_xfree(hook);
|
|
|
|
}
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rb_thread_execute_hooks(rb_event_flag_t event)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
|
|
|
|
rb_bug_errno("pthread_rwlock_rdlock", r);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rb_internal_thread_event_hooks) {
|
|
|
|
rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
|
|
|
|
do {
|
|
|
|
if (h->event & event) {
|
|
|
|
(*h->callback)(event, NULL, h->user_data);
|
|
|
|
}
|
|
|
|
} while((h = h->next));
|
|
|
|
}
|
|
|
|
if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
|
|
|
|
rb_bug_errno("pthread_rwlock_unlock", r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|