hijack SIGCHLD handler for internal use

Use a global SIGCHLD handler to guard all callers of rb_waitpid.
To work safely with multi-threaded programs, we introduce a
VM-wide waitpid_lock to be acquired BEFORE fork/vfork spawns the
process.  This is to be combined with the new ruby_waitpid_locked
function used by mjit.c in a non-Ruby thread.

Ruby-level SIGCHLD handlers registered with Signal.trap(:CHLD)
continues to work as before and there should be no regressions
in any existing use cases.

Splitting the wait queues for PID > 0 and groups (PID <= 0)
ensures we favor PID > 0 callers.

The disabling of SIGCHLD in rb_f_system is longer necessary,
as we use deferred signal handling and no longer make ANY
blocking waitpid syscalls in other threads which could "beat"
the waitpid call made by rb_f_system.

We prevent SIGCHLD from firing in normal Ruby Threads and only
enable it in the timer-thread, to prevent spurious wakeups
from in test/-ext-/gvl/test_last_thread.rb with MJIT enabled.

I've tried to guard as much of the code for RUBY_SIGCHLD==0
using C "if" statements rather than CPP "#if" so to reduce
the likelyhood of portability problems as the compiler will
see more code.

We also work to suppress false-positives from
Process.wait(-1, Process::WNOHANG) to quiets warnings from
spec/ruby/core/process/wait2_spec.rb with MJIT enabled.

Lastly, we must implement rb_grantpt for ext/pty.  We need a
MJIT-compatible way of supporting grantpt(3) which may spawn
the `pt_chown' binary and call waitpid(2) on it.

[ruby-core:87605] [Ruby trunk Bug#14867]

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63758 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
normal 2018-06-27 03:14:30 +00:00
Родитель e3e22c551d
Коммит 054a412d54
9 изменённых файлов: 447 добавлений и 99 удалений

Просмотреть файл

@ -1782,6 +1782,7 @@ AC_CHECK_FUNCS(getsid)
AC_CHECK_FUNCS(gettimeofday) # for making ac_cv_func_gettimeofday
AC_CHECK_FUNCS(getuidx)
AC_CHECK_FUNCS(gmtime_r)
AC_CHECK_FUNCS(grantpt)
AC_CHECK_FUNCS(initgroups)
AC_CHECK_FUNCS(ioctl)
AC_CHECK_FUNCS(isfinite)

Просмотреть файл

@ -246,19 +246,13 @@ get_device_once(int *master, int *slave, char SlaveName[DEVICELEN], int nomesg,
/* Unix98 PTY */
int masterfd = -1, slavefd = -1;
char *slavedevice;
struct sigaction dfl, old;
dfl.sa_handler = SIG_DFL;
dfl.sa_flags = 0;
sigemptyset(&dfl.sa_mask);
#if defined(__sun) || (defined(__FreeBSD__) && __FreeBSD_version < 902000)
/* workaround for Solaris 10: grantpt() doesn't work if FD_CLOEXEC is set. [ruby-dev:44688] */
/* FreeBSD 9.2 or later supports O_CLOEXEC
* http://www.freebsd.org/cgi/query-pr.cgi?pr=162374 */
if ((masterfd = posix_openpt(O_RDWR|O_NOCTTY)) == -1) goto error;
if (sigaction(SIGCHLD, &dfl, &old) == -1) goto error;
if (grantpt(masterfd) == -1) goto grantpt_error;
if (rb_grantpt(masterfd) == -1) goto error;
rb_fd_fix_cloexec(masterfd);
#else
{
@ -272,10 +266,8 @@ get_device_once(int *master, int *slave, char SlaveName[DEVICELEN], int nomesg,
if ((masterfd = posix_openpt(flags)) == -1) goto error;
}
rb_fd_fix_cloexec(masterfd);
if (sigaction(SIGCHLD, &dfl, &old) == -1) goto error;
if (grantpt(masterfd) == -1) goto grantpt_error;
if (rb_grantpt(masterfd) == -1) goto error;
#endif
if (sigaction(SIGCHLD, &old, NULL) == -1) goto error;
if (unlockpt(masterfd) == -1) goto error;
if ((slavedevice = ptsname(masterfd)) == NULL) goto error;
if (no_mesg(slavedevice, nomesg) == -1) goto error;
@ -293,8 +285,6 @@ get_device_once(int *master, int *slave, char SlaveName[DEVICELEN], int nomesg,
strlcpy(SlaveName, slavedevice, DEVICELEN);
return 0;
grantpt_error:
sigaction(SIGCHLD, &old, NULL);
error:
if (slavefd != -1) close(slavefd);
if (masterfd != -1) close(masterfd);
@ -346,21 +336,17 @@ get_device_once(int *master, int *slave, char SlaveName[DEVICELEN], int nomesg,
extern char *ptsname(int);
extern int unlockpt(int);
extern int grantpt(int);
#if defined(__sun)
/* workaround for Solaris 10: grantpt() doesn't work if FD_CLOEXEC is set. [ruby-dev:44688] */
if((masterfd = open("/dev/ptmx", O_RDWR, 0)) == -1) goto error;
s = signal(SIGCHLD, SIG_DFL);
if(grantpt(masterfd) == -1) goto error;
if(rb_grantpt(masterfd) == -1) goto error;
rb_fd_fix_cloexec(masterfd);
#else
if((masterfd = rb_cloexec_open("/dev/ptmx", O_RDWR, 0)) == -1) goto error;
rb_update_max_fd(masterfd);
s = signal(SIGCHLD, SIG_DFL);
if(grantpt(masterfd) == -1) goto error;
if(rb_grantpt(masterfd) == -1) goto error;
#endif
signal(SIGCHLD, s);
if(unlockpt(masterfd) == -1) goto error;
if((slavedevice = ptsname(masterfd)) == NULL) goto error;
if (no_mesg(slavedevice, nomesg) == -1) goto error;

Просмотреть файл

@ -2042,6 +2042,9 @@ VALUE rb_gcd_normal(VALUE self, VALUE other);
VALUE rb_gcd_gmp(VALUE x, VALUE y);
#endif
/* signal.c (export) */
int rb_grantpt(int fd);
/* string.c (export) */
#ifdef RUBY_ENCODING_H
/* internal use */

48
mjit.c
Просмотреть файл

@ -80,6 +80,7 @@
#include "constant.h"
#include "id_table.h"
#include "ruby_assert.h"
#include "ruby/thread.h"
#include "ruby/util.h"
#include "ruby/version.h"
@ -118,6 +119,10 @@ extern void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lo
extern int rb_thread_create_mjit_thread(void (*child_hook)(void), void (*worker_func)(void));
/* process.c */
pid_t ruby_waitpid_locked(rb_vm_t *, rb_pid_t, int *status, int options,
rb_nativethread_cond_t *cond);
#define RB_CONDATTR_CLOCK_MONOTONIC 1
#ifdef _WIN32
@ -401,22 +406,40 @@ start_process(const char *path, char *const *argv)
static int
exec_process(const char *path, char *const argv[])
{
int stat, exit_code;
int stat, exit_code = -2;
pid_t pid;
rb_vm_t *vm = RUBY_SIGCHLD ? GET_VM() : 0;
rb_nativethread_cond_t cond;
if (vm) {
rb_native_cond_initialize(&cond);
rb_native_mutex_lock(&vm->waitpid_lock);
}
pid = start_process(path, argv);
if (pid <= 0)
return -2;
for (;;) {
waitpid(pid, &stat, 0);
if (WIFEXITED(stat)) {
exit_code = WEXITSTATUS(stat);
break;
} else if (WIFSIGNALED(stat)) {
exit_code = -1;
for (;pid > 0;) {
pid_t r = vm ? ruby_waitpid_locked(vm, pid, &stat, 0, &cond)
: waitpid(pid, &stat, 0);
if (r == -1) {
if (errno == EINTR) continue;
fprintf(stderr, "[%d] waitpid(%d): %s\n",
getpid(), pid, strerror(errno));
break;
}
else if (r == pid) {
if (WIFEXITED(stat)) {
exit_code = WEXITSTATUS(stat);
break;
} else if (WIFSIGNALED(stat)) {
exit_code = -1;
break;
}
}
}
if (vm) {
rb_native_mutex_unlock(&vm->waitpid_lock);
rb_native_cond_destroy(&cond);
}
return exit_code;
}
@ -1491,12 +1514,15 @@ mjit_init(struct mjit_options *opts)
static void
stop_worker(void)
{
rb_execution_context_t *ec = GET_EC();
stop_worker_p = TRUE;
while (!worker_stopped) {
verbose(3, "Sending cancel signal to worker");
CRITICAL_SECTION_START(3, "in stop_worker");
rb_native_cond_broadcast(&mjit_worker_wakeup);
CRITICAL_SECTION_FINISH(3, "in stop_worker");
RUBY_VM_CHECK_INTS(ec);
}
}

274
process.c
Просмотреть файл

@ -885,12 +885,6 @@ pst_wcoredump(VALUE st)
#endif
}
struct waitpid_arg {
rb_pid_t pid;
int flags;
int *st;
};
static rb_pid_t
do_waitpid(rb_pid_t pid, int *st, int flags)
{
@ -903,45 +897,248 @@ do_waitpid(rb_pid_t pid, int *st, int flags)
#endif
}
static void *
rb_waitpid_blocking(void *data)
struct waitpid_state {
struct list_node wnode;
rb_execution_context_t *ec;
rb_nativethread_cond_t *cond;
rb_pid_t ret;
rb_pid_t pid;
int status;
int options;
int errnum;
};
void rb_native_mutex_lock(rb_nativethread_lock_t *);
void rb_native_mutex_unlock(rb_nativethread_lock_t *);
void rb_native_cond_signal(rb_nativethread_cond_t *);
void rb_native_cond_wait(rb_nativethread_cond_t *, rb_nativethread_lock_t *);
rb_nativethread_cond_t *rb_sleep_cond_get(const rb_execution_context_t *);
void rb_sleep_cond_put(rb_nativethread_cond_t *);
static void
waitpid_notify(struct waitpid_state *w, pid_t ret)
{
struct waitpid_arg *arg = data;
rb_pid_t result = do_waitpid(arg->pid, arg->st, arg->flags);
return (void *)(VALUE)result;
w->ret = ret;
list_del_init(&w->wnode);
rb_native_cond_signal(w->cond);
}
static rb_pid_t
do_waitpid_nonblocking(rb_pid_t pid, int *st, int flags)
/* called by both timer thread and main thread */
static void
waitpid_each(struct list_head *head)
{
void *result;
struct waitpid_arg arg;
arg.pid = pid;
arg.st = st;
arg.flags = flags;
result = rb_thread_call_without_gvl(rb_waitpid_blocking, &arg,
RUBY_UBF_PROCESS, 0);
return (rb_pid_t)(VALUE)result;
struct waitpid_state *w = 0, *next;
list_for_each_safe(head, w, next, wnode) {
pid_t ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
if (!ret) continue;
if (ret == -1) w->errnum = errno;
if (w->ec) { /* rb_waitpid */
rb_thread_t *th = rb_ec_thread_ptr(w->ec);
rb_native_mutex_lock(&th->interrupt_lock);
waitpid_notify(w, ret);
rb_native_mutex_unlock(&th->interrupt_lock);
}
else { /* ruby_waitpid_locked */
waitpid_notify(w, ret);
}
}
}
void
ruby_waitpid_all(rb_vm_t *vm)
{
rb_native_mutex_lock(&vm->waitpid_lock);
waitpid_each(&vm->waiting_pids);
if (list_empty(&vm->waiting_pids)) {
waitpid_each(&vm->waiting_grps);
}
rb_native_mutex_unlock(&vm->waitpid_lock);
}
static void
waitpid_state_init(struct waitpid_state *w, pid_t pid, int options)
{
w->ret = 0;
w->pid = pid;
w->options = options;
}
/*
* must be called with vm->waitpid_lock held, this is not interruptible
*/
pid_t
ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
rb_nativethread_cond_t *cond)
{
struct waitpid_state w;
assert(!ruby_thread_has_gvl_p() && "must not have GVL");
waitpid_state_init(&w, pid, options);
if (w.pid > 0 || list_empty(&vm->waiting_pids))
w.ret = do_waitpid(w.pid, &w.status, w.options | WNOHANG);
if (w.ret) {
if (w.ret == -1) w.errnum = errno;
}
else {
w.cond = cond;
w.ec = 0;
list_add(w.pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w.wnode);
do {
rb_native_cond_wait(w.cond, &vm->waitpid_lock);
} while (!w.ret);
list_del(&w.wnode);
}
if (status) {
*status = w.status;
}
if (w.ret == -1) errno = w.errnum;
return w.ret;
}
static void
waitpid_wake(void *x)
{
struct waitpid_state *w = x;
/* th->interrupt_lock is already held by rb_threadptr_interrupt_common */
rb_native_cond_signal(w->cond);
}
static void *
waitpid_nogvl(void *x)
{
struct waitpid_state *w = x;
rb_thread_t *th = rb_ec_thread_ptr(w->ec);
rb_native_mutex_lock(&th->interrupt_lock);
if (!w->ret) { /* we must check this before waiting */
rb_native_cond_wait(w->cond, &th->interrupt_lock);
}
rb_native_mutex_unlock(&th->interrupt_lock);
return 0;
}
static VALUE
waitpid_sleep(VALUE x)
{
struct waitpid_state *w = (struct waitpid_state *)x;
while (!w->ret) {
rb_thread_call_without_gvl(waitpid_nogvl, w, waitpid_wake, w);
}
return Qfalse;
}
static VALUE
waitpid_cleanup(VALUE x)
{
struct waitpid_state *w = (struct waitpid_state *)x;
if (w->ret == 0) {
rb_vm_t *vm = rb_ec_vm_ptr(w->ec);
rb_native_mutex_lock(&vm->waitpid_lock);
list_del(&w->wnode);
rb_native_mutex_unlock(&vm->waitpid_lock);
}
rb_sleep_cond_put(w->cond);
return Qfalse;
}
static void
waitpid_wait(struct waitpid_state *w)
{
rb_vm_t *vm = rb_ec_vm_ptr(w->ec);
/*
* Lock here to prevent do_waitpid from stealing work from the
* ruby_waitpid_locked done by mjit workers since mjit works
* outside of GVL
*/
rb_native_mutex_lock(&vm->waitpid_lock);
if (w->pid > 0 || list_empty(&vm->waiting_pids))
w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
if (w->ret) {
w->cond = 0;
if (w->ret == -1) w->errnum = errno;
}
else if (w->options & WNOHANG) {
w->cond = 0;
/* MJIT must be waiting, but don't tell Ruby callers about it */
if (w->pid < 0 && !list_empty(&vm->waiting_pids)) {
w->ret = -1;
w->errnum = ECHILD;
}
}
else {
w->cond = rb_sleep_cond_get(w->ec);
/* order matters, favor specified PIDs rather than -1 or 0 */
list_add(w->pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w->wnode);
}
rb_native_mutex_unlock(&vm->waitpid_lock);
if (w->cond) {
rb_ensure(waitpid_sleep, (VALUE)w, waitpid_cleanup, (VALUE)w);
}
}
static void *
waitpid_blocking_no_SIGCHLD(void *x)
{
struct waitpid_state *w = x;
w->ret = do_waitpid(w->pid, &w->status, w->options);
return 0;
}
static void
waitpid_no_SIGCHLD(struct waitpid_state *w)
{
if (w->options & WNOHANG) {
w->ret = do_waitpid(w->pid, &w->status, w->options);
}
else {
do {
rb_thread_call_without_gvl(waitpid_blocking_no_SIGCHLD, &w,
RUBY_UBF_PROCESS, 0);
} while (w->ret < 0 && errno == EINTR && (RUBY_VM_CHECK_INTS(w->ec),1));
}
}
rb_pid_t
rb_waitpid(rb_pid_t pid, int *st, int flags)
{
rb_pid_t result;
struct waitpid_state w;
if (flags & WNOHANG) {
result = do_waitpid(pid, st, flags);
waitpid_state_init(&w, pid, flags);
w.ec = GET_EC();
if (RUBY_SIGCHLD) {
waitpid_wait(&w);
}
else {
while ((result = do_waitpid_nonblocking(pid, st, flags)) < 0 &&
(errno == EINTR)) {
RUBY_VM_CHECK_INTS(GET_EC());
}
waitpid_no_SIGCHLD(&w);
}
if (result > 0) {
rb_last_status_set(*st, result);
if (st) *st = w.status;
if (w.ret > 0) {
rb_last_status_set(w.status, w.ret);
}
return result;
if (w.ret == -1) errno = w.errnum;
return w.ret;
}
@ -3595,6 +3792,8 @@ disable_child_handler_fork_child(struct child_handler_disabler_state *old, char
}
}
/* non-Ruby child process, ensure cmake can see SIGCHLD */
sigemptyset(&old->sigmask);
ret = sigprocmask(SIG_SETMASK, &old->sigmask, NULL); /* async-signal-safe */
if (ret != 0) {
ERRMSG("sigprocmask");
@ -4086,16 +4285,6 @@ rb_f_system(int argc, VALUE *argv)
VALUE execarg_obj;
struct rb_execarg *eargp;
#if defined(SIGCLD) && !defined(SIGCHLD)
# define SIGCHLD SIGCLD
#endif
#ifdef SIGCHLD
RETSIGTYPE (*chfunc)(int);
rb_last_status_clear();
chfunc = signal(SIGCHLD, SIG_DFL);
#endif
execarg_obj = rb_execarg_new(argc, argv, TRUE, TRUE);
pid = rb_execarg_spawn(execarg_obj, NULL, 0);
#if defined(HAVE_WORKING_FORK) || defined(HAVE_SPAWNV)
@ -4105,9 +4294,6 @@ rb_f_system(int argc, VALUE *argv)
if (ret == (rb_pid_t)-1)
rb_sys_fail("Another thread waited the process started by system().");
}
#endif
#ifdef SIGCHLD
signal(SIGCHLD, chfunc);
#endif
TypedData_Get_Struct(execarg_obj, struct rb_execarg, &exec_arg_data_type, eargp);
if (pid < 0) {

126
signal.c
Просмотреть файл

@ -62,10 +62,6 @@ ruby_atomic_compare_and_swap(rb_atomic_t *ptr, rb_atomic_t cmp,
}
#endif
#ifndef NSIG
# define NSIG (_SIGMAX + 1) /* For QNX */
#endif
static const struct signals {
const char *signm;
int signo;
@ -129,15 +125,9 @@ static const struct signals {
#ifdef SIGCONT
{"CONT", SIGCONT},
#endif
#ifdef SIGCHLD
{"CHLD", SIGCHLD},
#endif
#ifdef SIGCLD
{"CLD", SIGCLD},
#else
# ifdef SIGCHLD
{"CLD", SIGCHLD},
# endif
#if RUBY_SIGCHLD
{"CHLD", RUBY_SIGCHLD },
{"CLD", RUBY_SIGCHLD },
#endif
#ifdef SIGTTIN
{"TTIN", SIGTTIN},
@ -702,12 +692,29 @@ signal_enque(int sig)
ATOMIC_INC(signal_buff.size);
}
static sig_atomic_t sigchld_hit;
/* Prevent compiler from reordering access */
#define ACCESS_ONCE(type,x) (*((volatile type *)&(x)))
static RETSIGTYPE
sighandler(int sig)
{
int old_errnum = errno;
signal_enque(sig);
/* the VM always needs to handle SIGCHLD for rb_waitpid */
if (sig == RUBY_SIGCHLD) {
rb_vm_t *vm = GET_VM();
sigchld_hit = 1;
/* avoid spurious wakeup in main thread iff nobody uses trap(:CHLD) */
if (vm && ACCESS_ONCE(VALUE, vm->trap_list.cmd[sig])) {
signal_enque(sig);
}
}
else {
signal_enque(sig);
}
rb_thread_wakeup_timer_thread();
#if !defined(BSD_SIGNAL) && !defined(POSIX_SIGNAL)
ruby_signal(sig, sighandler);
@ -742,6 +749,7 @@ rb_enable_interrupt(void)
#ifdef HAVE_PTHREAD_SIGMASK
sigset_t mask;
sigemptyset(&mask);
sigaddset(&mask, RUBY_SIGCHLD); /* timer-thread handles this */
pthread_sigmask(SIG_SETMASK, &mask, NULL);
#endif
}
@ -1052,6 +1060,18 @@ rb_trap_exit(void)
}
}
void ruby_waitpid_all(rb_vm_t *); /* process.c */
/* only runs in the timer-thread */
void
ruby_sigchld_handler(rb_vm_t *vm)
{
if (sigchld_hit) {
sigchld_hit = 0;
ruby_waitpid_all(vm);
}
}
void
rb_signal_exec(rb_thread_t *th, int sig)
{
@ -1117,6 +1137,9 @@ default_handler(int sig)
#endif
#ifdef SIGUSR2
case SIGUSR2:
#endif
#if RUBY_SIGCHLD
case RUBY_SIGCHLD:
#endif
func = sighandler;
break;
@ -1155,6 +1178,9 @@ trap_handler(VALUE *cmd, int sig)
VALUE command;
if (NIL_P(*cmd)) {
if (sig == RUBY_SIGCHLD) {
goto sig_dfl;
}
func = SIG_IGN;
}
else {
@ -1175,6 +1201,9 @@ trap_handler(VALUE *cmd, int sig)
break;
case 14:
if (memcmp(cptr, "SYSTEM_DEFAULT", 14) == 0) {
if (sig == RUBY_SIGCHLD) {
goto sig_dfl;
}
func = SIG_DFL;
*cmd = 0;
}
@ -1182,6 +1211,9 @@ trap_handler(VALUE *cmd, int sig)
case 7:
if (memcmp(cptr, "SIG_IGN", 7) == 0) {
sig_ign:
if (sig == RUBY_SIGCHLD) {
goto sig_dfl;
}
func = SIG_IGN;
*cmd = Qtrue;
}
@ -1268,7 +1300,7 @@ trap(int sig, sighandler_t func, VALUE command)
break;
}
vm->trap_list.cmd[sig] = command;
ACCESS_ONCE(VALUE, vm->trap_list.cmd[sig]) = command;
vm->trap_list.safe[sig] = rb_safe_level();
return oldcmd;
@ -1413,20 +1445,18 @@ install_sighandler(int signum, sighandler_t handler)
# define install_sighandler(signum, handler) \
INSTALL_SIGHANDLER(install_sighandler(signum, handler), #signum, signum)
#if defined(SIGCLD) || defined(SIGCHLD)
#if RUBY_SIGCHLD
static int
init_sigchld(int sig)
{
sighandler_t oldfunc;
sighandler_t func = sighandler;
oldfunc = ruby_signal(sig, SIG_DFL);
if (oldfunc == SIG_ERR) return -1;
if (oldfunc != SIG_DFL && oldfunc != SIG_IGN) {
ruby_signal(sig, oldfunc);
}
else {
GET_VM()->trap_list.cmd[sig] = 0;
}
ruby_signal(sig, func);
ACCESS_ONCE(VALUE, GET_VM()->trap_list.cmd[sig]) = 0;
return 0;
}
@ -1542,11 +1572,55 @@ Init_signal(void)
install_sighandler(SIGSYS, sig_do_nothing);
#endif
#if defined(SIGCLD)
init_sigchld(SIGCLD);
#elif defined(SIGCHLD)
init_sigchld(SIGCHLD);
#if RUBY_SIGCHLD
init_sigchld(RUBY_SIGCHLD);
#endif
rb_enable_interrupt();
}
#if defined(HAVE_GRANTPT)
extern int grantpt(int);
#else
static int
fake_grantfd(int masterfd)
{
errno = ENOSYS;
return -1;
}
#define grantpt(fd) fake_grantfd(int)
#endif
int
rb_grantpt(int masterfd)
{
if (RUBY_SIGCHLD) {
rb_vm_t *vm = GET_VM();
int ret, e;
/*
* Prevent waitpid calls from Ruby by taking waitpid_lock.
* Pedantically, grantpt(3) is undefined if a non-default
* SIGCHLD handler is defined, but preventing conflicting
* waitpid calls ought to be sufficient.
*
* We could install the default sighandler temporarily, but that
* could cause SIGCHLD to be missed by other threads. Blocking
* SIGCHLD won't work here, either, unless we stop and restart
* timer-thread (as only timer-thread sees SIGCHLD), but that
* seems like overkill.
*/
rb_nativethread_lock_lock(&vm->waitpid_lock);
{
ret = grantpt(masterfd); /* may spawn `pt_chown' and wait on it */
if (ret < 0) e = errno;
}
rb_nativethread_lock_unlock(&vm->waitpid_lock);
if (ret < 0) errno = e;
return ret;
}
else {
return grantpt(masterfd);
}
}

Просмотреть файл

@ -413,6 +413,10 @@ rb_vm_gvl_destroy(rb_vm_t *vm)
gvl_release(vm);
gvl_destroy(vm);
rb_native_mutex_destroy(&vm->thread_destruct_lock);
if (0) {
/* may be held by running threads */
rb_native_mutex_destroy(&vm->waitpid_lock);
}
}
void
@ -4131,6 +4135,9 @@ rb_gc_set_stack_end(VALUE **stack_end_p)
#endif
/* signal.c */
void ruby_sigchld_handler(rb_vm_t *);
/*
*
*/
@ -4163,6 +4170,7 @@ timer_thread_function(void *arg)
rb_native_mutex_unlock(&vm->thread_destruct_lock);
/* check signal */
ruby_sigchld_handler(vm);
rb_threadptr_check_signal(vm->main_thread);
#if 0
@ -4247,6 +4255,9 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r
}
rb_vm_living_threads_init(vm);
rb_vm_living_threads_insert(vm, th);
/* may be held by MJIT threads in parent */
rb_native_mutex_initialize(&vm->waitpid_lock);
vm->fork_gen++;
vm->sleeper = 0;
@ -4999,6 +5010,7 @@ Init_Thread(void)
gvl_init(th->vm);
gvl_acquire(th->vm, th);
rb_native_mutex_initialize(&th->vm->thread_destruct_lock);
rb_native_mutex_initialize(&th->vm->waitpid_lock);
rb_native_mutex_initialize(&th->interrupt_lock);
th->pending_interrupt_queue = rb_ary_tmp_new(0);
@ -5302,3 +5314,25 @@ rb_uninterruptible(VALUE (*b_proc)(ANYARGS), VALUE data)
return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
}
#ifndef USE_NATIVE_SLEEP_COND
# define USE_NATIVE_SLEEP_COND (0)
#endif
#if !USE_NATIVE_SLEEP_COND
rb_nativethread_cond_t *
rb_sleep_cond_get(const rb_execution_context_t *ec)
{
rb_nativethread_cond_t *cond = ALLOC(rb_nativethread_cond_t);
rb_native_cond_initialize(cond);
return cond;
}
void
rb_sleep_cond_put(rb_nativethread_cond_t *cond)
{
rb_native_cond_destroy(cond);
xfree(cond);
}
#endif /* !USE_NATIVE_SLEEP_COND */

Просмотреть файл

@ -1479,6 +1479,13 @@ static void *
thread_timer(void *p)
{
rb_global_vm_lock_t *gvl = (rb_global_vm_lock_t *)p;
#ifdef HAVE_PTHREAD_SIGMASK /* mainly to enable SIGCHLD */
{
sigset_t mask;
sigemptyset(&mask);
pthread_sigmask(SIG_SETMASK, &mask, NULL);
}
#endif
if (TT_DEBUG) WRITE_CONST(2, "start timer thread\n");
@ -1764,4 +1771,22 @@ rb_thread_create_mjit_thread(void (*child_hook)(void), void (*worker_func)(void)
return ret;
}
#define USE_NATIVE_SLEEP_COND (1)
#if USE_NATIVE_SLEEP_COND
rb_nativethread_cond_t *
rb_sleep_cond_get(const rb_execution_context_t *ec)
{
rb_thread_t *th = rb_ec_thread_ptr(ec);
return &th->native_thread_data.sleep_cond;
}
void
rb_sleep_cond_put(rb_nativethread_cond_t *cond)
{
/* no-op */
}
#endif /* USE_NATIVE_SLEEP_COND */
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */

Просмотреть файл

@ -92,6 +92,14 @@
#define RUBY_NSIG NSIG
#if defined(SIGCLD)
# define RUBY_SIGCHLD (SIGCLD)
#elif defined(SIGCHLD)
# define RUBY_SIGCHLD (SIGCHLD)
#else
# define RUBY_SIGCHLD (0)
#endif
#ifdef HAVE_STDARG_PROTOTYPES
#include <stdarg.h>
#define va_init_list(a,b) va_start((a),(b))
@ -553,6 +561,9 @@ typedef struct rb_vm_struct {
#endif
rb_serial_t fork_gen;
rb_nativethread_lock_t waitpid_lock;
struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
struct list_head waiting_fds; /* <=> struct waiting_fd */
struct list_head living_threads;
VALUE thgroup_default;
@ -1561,6 +1572,8 @@ static inline void
rb_vm_living_threads_init(rb_vm_t *vm)
{
list_head_init(&vm->waiting_fds);
list_head_init(&vm->waiting_pids);
list_head_init(&vm->waiting_grps);
list_head_init(&vm->living_threads);
vm->living_thread_num = 0;
}