Replace copy coroutine with pthread implementation.

This commit is contained in:
Samuel Williams 2021-06-26 10:17:26 +12:00
Родитель 9c9531950c
Коммит 42130a64f0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: A0765423A44728FB
30 изменённых файлов: 492 добавлений и 449 удалений

2
.github/workflows/compilers.yml поставляемый
Просмотреть файл

@ -93,7 +93,7 @@ jobs:
- { key: append_configure, name: jemalloc, value: '--with-jemalloc' }
- { key: append_configure, name: valgrind, value: '--with-valgrind' }
- { key: append_configure, name: 'coroutine=ucontext', value: '--with-coroutine=ucontext' }
- { key: append_configure, name: 'coroutine=copy', value: '--with-coroutine=copy' }
- { key: append_configure, name: 'coroutine=pthread', value: '--with-coroutine=pthread' }
- { key: append_configure, name: disable-jit-support, value: '--disable-jit-support' }
- { key: append_configure, name: disable-dln, value: '--disable-dln' }
- { key: append_configure, name: disable-rubygems, value: '--disable-rubygems' }

Просмотреть файл

@ -128,6 +128,8 @@ Outstanding ones only.
* Make `Monitor` fiber-safe. [[Bug #17827]]
* Replace copy coroutine with pthread implementation. [[Feature #18015]]
## Stdlib updates
Outstanding ones only.
@ -193,3 +195,4 @@ Excluding feature bug fixes.
[Feature #17470]: https://bugs.ruby-lang.org/issues/17470
[Feature #17853]: https://bugs.ruby-lang.org/issues/17853
[Bug #17827]: https://bugs.ruby-lang.org/issues/17827
[Feature #18015]: https://bugs.ruby-lang.org/issues/18015

Просмотреть файл

@ -19,12 +19,12 @@ assert_equal %q{ok}, %q{
}
assert_equal %q{ok}, %q{
10_000.times.collect{Fiber.new{}}
100.times.collect{Fiber.new{}}
:ok
}
assert_equal %q{ok}, %q{
fibers = 100.times.collect{Fiber.new{Fiber.yield}}
fibers = 1000.times.collect{Fiber.new{Fiber.yield}}
fibers.each(&:resume)
fibers.each(&:resume)
:ok

Просмотреть файл

@ -2533,10 +2533,10 @@ AS_CASE([$coroutine_type], [yes|''], [
coroutine_type=x86
],
[*-openbsd*], [
coroutine_type=copy
coroutine_type=pthread
],
[*-haiku*], [
coroutine_type=copy
coroutine_type=pthread
],
[*-emscripten*], [
coroutine_type=emscripten
@ -2544,7 +2544,7 @@ AS_CASE([$coroutine_type], [yes|''], [
[
AC_CHECK_FUNCS([getcontext swapcontext makecontext],
[coroutine_type=ucontext],
[coroutine_type=copy; break]
[coroutine_type=pthread; break]
)
]
)
@ -2554,7 +2554,7 @@ COROUTINE_H=coroutine/$coroutine_type/Context.h
AS_IF([test ! -f "$srcdir/$COROUTINE_H"],
[AC_MSG_ERROR('$coroutine_type' is not supported as coroutine)])
AS_CASE([$coroutine_type],
[copy|ucontext], [
[ucontext|pthread], [
COROUTINE_SRC=coroutine/$coroutine_type/Context.c
],
[

141
cont.c
Просмотреть файл

@ -711,10 +711,51 @@ fiber_pool_stack_release(struct fiber_pool_stack * stack)
#endif
}
void rb_fiber_start(rb_fiber_t*);
static inline void
ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
{
rb_execution_context_t *ec = &fiber->cont.saved_ec;
rb_ractor_set_current_ec(th->ractor, th->ec = ec);
// ruby_current_execution_context_ptr = th->ec = ec;
/*
* timer-thread may set trap interrupt on previous th->ec at any time;
* ensure we do not delay (or lose) the trap interrupt handling.
*/
if (th->vm->ractor.main_thread == th &&
rb_signal_buff_size() > 0) {
RUBY_VM_SET_TRAP_INTERRUPT(ec);
}
VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
}
static inline void
fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
{
ec_switch(th, fiber);
VM_ASSERT(th->ec->fiber_ptr == fiber);
}
static COROUTINE
fiber_entry(struct coroutine_context * from, struct coroutine_context * to)
{
rb_fiber_start();
rb_fiber_t *fiber = to->argument;
rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
#ifdef COROUTINE_PTHREAD_CONTEXT
ruby_thread_set_native(thread);
#endif
fiber_restore_thread(thread, fiber);
rb_fiber_start(fiber);
#ifndef COROUTINE_PTHREAD_CONTEXT
VM_UNREACHABLE(fiber_entry);
#endif
}
// Initialize a fiber's coroutine's machine stack and vm stack.
@ -731,22 +772,13 @@ fiber_initialize_coroutine(rb_fiber_t *fiber, size_t * vm_stack_size)
vm_stack = fiber_pool_stack_alloca(&fiber->stack, fiber_pool->vm_stack_size);
*vm_stack_size = fiber_pool->vm_stack_size;
#ifdef COROUTINE_PRIVATE_STACK
coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, sec->machine.stack_start);
// The stack for this execution context is still the main machine stack, so don't adjust it.
// If this is not managed correctly, you will fail in `rb_ec_stack_check`.
// We limit the machine stack usage to the fiber stack size.
if (sec->machine.stack_maxsize > fiber->stack.available) {
sec->machine.stack_maxsize = fiber->stack.available;
}
#else
coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
// The stack for this execution context is the one we allocated:
sec->machine.stack_start = fiber->stack.current;
sec->machine.stack_maxsize = fiber->stack.available;
#endif
fiber->context.argument = (void*)fiber;
return vm_stack;
}
@ -815,25 +847,6 @@ fiber_status_set(rb_fiber_t *fiber, enum fiber_status s)
fiber->status = s;
}
static inline void
ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
{
rb_execution_context_t *ec = &fiber->cont.saved_ec;
rb_ractor_set_current_ec(th->ractor, th->ec = ec);
// ruby_current_execution_context_ptr = th->ec = ec;
/*
* timer-thread may set trap interrupt on previous th->ec at any time;
* ensure we do not delay (or lose) the trap interrupt handling.
*/
if (th->vm->ractor.main_thread == th &&
rb_signal_buff_size() > 0) {
RUBY_VM_SET_TRAP_INTERRUPT(ec);
}
VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
}
static rb_context_t *
cont_ptr(VALUE obj)
{
@ -1041,7 +1054,7 @@ fiber_free(void *ptr)
rb_fiber_t *fiber = ptr;
RUBY_FREE_ENTER("fiber");
//if (DEBUG) fprintf(stderr, "fiber_free: %p[%p]\n", fiber, fiber->stack.base);
if (DEBUG) fprintf(stderr, "fiber_free: %p[%p]\n", fiber, fiber->stack.base);
if (fiber->cont.saved_ec.local_storage) {
rb_id_table_free(fiber->cont.saved_ec.local_storage);
@ -1277,13 +1290,6 @@ cont_capture(volatile int *volatile stat)
}
COMPILER_WARNING_POP
static inline void
fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
{
ec_switch(th, fiber);
VM_ASSERT(th->ec->fiber_ptr == fiber);
}
static inline void
cont_restore_thread(rb_context_t *cont)
{
@ -1326,7 +1332,6 @@ cont_restore_thread(rb_context_t *cont)
th->ec->cfp = sec->cfp;
th->ec->raised_flag = sec->raised_flag;
th->ec->tag = sec->tag;
th->ec->protect_tag = sec->protect_tag;
th->ec->root_lep = sec->root_lep;
th->ec->root_svar = sec->root_svar;
th->ec->ensure_list = sec->ensure_list;
@ -1367,13 +1372,17 @@ fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber)
/* old_fiber->machine.stack_end should be NULL */
old_fiber->cont.saved_ec.machine.stack_end = NULL;
/* restore thread context */
fiber_restore_thread(th, new_fiber);
// if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] -> %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
/* swap machine context */
coroutine_transfer(&old_fiber->context, &new_fiber->context);
struct coroutine_context * from = coroutine_transfer(&old_fiber->context, &new_fiber->context);
if (from == NULL) {
rb_syserr_fail(errno, "coroutine_transfer");
}
/* restore thread context */
fiber_restore_thread(th, old_fiber);
// It's possible to get here, and new_fiber is already freed.
// if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] <- %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
@ -1670,9 +1679,6 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
if (cont_thread_value(cont) != th->self) {
rb_raise(rb_eRuntimeError, "continuation called across threads");
}
if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
}
if (cont->saved_ec.fiber_ptr) {
if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
rb_raise(rb_eRuntimeError, "continuation called across fiber");
@ -2031,13 +2037,13 @@ rb_fiber_set_scheduler(VALUE klass, VALUE scheduler)
return rb_fiber_scheduler_set(scheduler);
}
NORETURN(static void rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt, VALUE err));
static void rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt, VALUE err);
void
rb_fiber_start(void)
rb_fiber_start(rb_fiber_t *fiber)
{
rb_thread_t * volatile th = GET_THREAD();
rb_fiber_t *fiber = th->ec->fiber_ptr;
rb_thread_t * volatile th = fiber->cont.saved_ec.thread_ptr;
rb_proc_t *proc;
enum ruby_tag_type state;
int need_interrupt = TRUE;
@ -2084,7 +2090,6 @@ rb_fiber_start(void)
}
rb_fiber_terminate(fiber, need_interrupt, err);
VM_UNREACHABLE(rb_fiber_start);
}
static rb_fiber_t *
@ -2101,12 +2106,7 @@ root_fiber_alloc(rb_thread_t *th)
DATA_PTR(fiber_value) = fiber;
fiber->cont.self = fiber_value;
#ifdef COROUTINE_PRIVATE_STACK
fiber->stack = fiber_pool_stack_acquire(&shared_fiber_pool);
coroutine_initialize_main(&fiber->context, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, th->ec->machine.stack_start);
#else
coroutine_initialize_main(&fiber->context);
#endif
return fiber;
}
@ -2255,10 +2255,8 @@ fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, VALUE
if (cont_thread_value(cont) != th->self) {
rb_raise(rb_eFiberError, "fiber called across threads");
}
else if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
}
else if (FIBER_TERMINATED_P(fiber)) {
if (FIBER_TERMINATED_P(fiber)) {
value = rb_exc_new2(rb_eFiberError, "dead fiber called");
if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
@ -2307,9 +2305,12 @@ fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, VALUE
fiber_store(fiber, th);
// We cannot free the stack until the pthread is joined:
#ifndef COROUTINE_PTHREAD_CONTEXT
if (RTEST(resuming_fiber) && FIBER_TERMINATED_P(fiber)) {
fiber_stack_release(fiber);
}
#endif
if (fiber_current()->blocking) {
th->blocking += 1;
@ -2388,26 +2389,24 @@ rb_fiber_close(rb_fiber_t *fiber)
}
static void
rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt, VALUE err)
rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt, VALUE error)
{
VALUE value = fiber->cont.value;
rb_fiber_t *next_fiber;
VM_ASSERT(FIBER_RESUMED_P(fiber));
rb_fiber_close(fiber);
coroutine_destroy(&fiber->context);
fiber->cont.machine.stack = NULL;
fiber->cont.machine.stack_size = 0;
next_fiber = return_fiber(true);
rb_fiber_t *next_fiber = return_fiber(true);
if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
if (RTEST(err))
fiber_switch(next_fiber, -1, &err, RB_NO_KEYWORDS, Qfalse, false);
if (RTEST(error))
fiber_switch(next_fiber, -1, &error, RB_NO_KEYWORDS, Qfalse, false);
else
fiber_switch(next_fiber, 1, &value, RB_NO_KEYWORDS, Qfalse, false);
VM_UNREACHABLE(rb_fiber_terminate);
}
VALUE
@ -2436,7 +2435,9 @@ rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
rb_raise(rb_eFiberError, "attempt to resume a transferring fiber");
}
return fiber_switch(fiber, argc, argv, kw_splat, fiber_value, false);
VALUE result = fiber_switch(fiber, argc, argv, kw_splat, fiber_value, false);
return result;
}
VALUE

Просмотреть файл

@ -1,21 +0,0 @@
#ifndef COROUTINE_STACK_H
#define COROUTINE_STACK_H 1
/*
* This file is part of the "Coroutine" project and released under the MIT License.
*
* Created by Samuel Williams on 10/11/2020.
* Copyright, 2020, by Samuel Williams.
*/
#include COROUTINE_H
#ifdef COROUTINE_PRIVATE_STACK
#define COROUTINE_STACK_LOCAL(type, name) type *name = ruby_xmalloc(sizeof(type))
#define COROUTINE_STACK_FREE(name) ruby_xfree(name)
#else
#define COROUTINE_STACK_LOCAL(type, name) type name##_local; type * name = &name##_local
#define COROUTINE_STACK_FREE(name)
#endif
#endif /* COROUTINE_STACK_H */

Просмотреть файл

@ -22,6 +22,7 @@ enum {COROUTINE_REGISTERS = 6};
struct coroutine_context
{
void **stack_pointer;
void *argument;
};
typedef COROUTINE(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);

Просмотреть файл

@ -23,6 +23,7 @@ enum {COROUTINE_REGISTERS = 8};
struct coroutine_context
{
void **stack_pointer;
void *argument;
};
typedef COROUTINE(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);

Просмотреть файл

@ -22,6 +22,7 @@ enum {COROUTINE_REGISTERS = 0xb0 / 8};
struct coroutine_context
{
void **stack_pointer;
void *argument;
};
typedef COROUTINE(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);

Просмотреть файл

@ -1,162 +0,0 @@
/*
* This file is part of the "Coroutine" project and released under the MIT License.
*
* Created by Samuel Williams on 24/6/2019.
* Copyright, 2019, by Samuel Williams.
*/
#include "Context.h"
#include <stdint.h>
// http://gcc.gnu.org/onlinedocs/gcc/Alternate-Keywords.html
#ifndef __GNUC__
#define __asm__ asm
#endif
#if defined(__sparc)
__attribute__((noinline))
// https://marc.info/?l=linux-sparc&m=131914569320660&w=2
static void coroutine_flush_register_windows(void) {
__asm__
#ifdef __GNUC__
__volatile__
#endif
#if defined(__sparcv9) || defined(__sparc_v9__) || defined(__arch64__)
#ifdef __GNUC__
("flushw" : : : "%o7")
#else
("flushw")
#endif
#else
("ta 0x03")
#endif
;
}
#else
static void coroutine_flush_register_windows(void) {}
#endif
__attribute__((noinline))
void *coroutine_stack_pointer(void) {
return (void*)(
(char*)__builtin_frame_address(0)
);
}
// Save the current stack to a private area. It is likely that when restoring the stack, this stack frame will be incomplete. But that is acceptable since the previous stack frame which called `setjmp` should be correctly restored.
__attribute__((noinline))
int coroutine_save_stack_1(struct coroutine_context * context) {
assert(context->stack);
assert(context->base);
void *stack_pointer = coroutine_stack_pointer();
// At this point, you may need to ensure on architectures that use register windows, that all registers are flushed to the stack, otherwise the copy of the stack will not contain the valid registers:
coroutine_flush_register_windows();
// Save stack to private area:
if (stack_pointer < context->base) {
size_t size = (char*)context->base - (char*)stack_pointer;
assert(size <= context->size);
memcpy(context->stack, stack_pointer, size);
context->used = size;
} else {
size_t size = (char*)stack_pointer - (char*)context->base;
assert(size <= context->size);
memcpy(context->stack, context->base, size);
context->used = size;
}
// Initialized:
return 0;
}
// Copy the current stack to a private memory buffer.
int coroutine_save_stack(struct coroutine_context * context) {
if (_setjmp(context->state)) {
// Restored.
return 1;
}
// We need to invoke the memory copy from one stack frame deeper than the one that calls setjmp. That is because if you don't do this, the setjmp might be restored into an invalid stack frame (truncated, etc):
return coroutine_save_stack_1(context);
}
__attribute__((noreturn, noinline))
void coroutine_restore_stack_padded(struct coroutine_context *context, void * buffer) {
void *stack_pointer = coroutine_stack_pointer();
assert(context->base);
// At this point, you may need to ensure on architectures that use register windows, that all registers are flushed to the stack, otherwise when we copy in the new stack, the registers would not be updated:
coroutine_flush_register_windows();
// Restore stack from private area:
if (stack_pointer < context->base) {
void * bottom = (char*)context->base - context->used;
assert(bottom > stack_pointer);
memcpy(bottom, context->stack, context->used);
} else {
void * top = (char*)context->base + context->used;
assert(top < stack_pointer);
memcpy(context->base, context->stack, context->used);
}
// Restore registers. The `buffer` is to force the compiler NOT to elide he buffer and `alloca`:
_longjmp(context->state, (int)(1 | (intptr_t)buffer));
}
// In order to swap between coroutines, we need to swap the stack and registers.
// `setjmp` and `longjmp` are able to swap registers, but what about swapping stacks? You can use `memcpy` to copy the current stack to a private area and `memcpy` to copy the private stack of the next coroutine to the main stack.
// But if the stack yop are copying in to the main stack is bigger than the currently executing stack, the `memcpy` will clobber the current stack frame (including the context argument). So we use `alloca` to push the current stack frame *beyond* the stack we are about to copy in. This ensures the current stack frame in `coroutine_restore_stack_padded` remains valid for calling `longjmp`.
__attribute__((noreturn))
void coroutine_restore_stack(struct coroutine_context *context) {
void *stack_pointer = coroutine_stack_pointer();
void *buffer = NULL;
// We must ensure that the next stack frame is BEYOND the stack we are restoring:
if (stack_pointer < context->base) {
intptr_t offset = (intptr_t)stack_pointer - ((intptr_t)context->base - context->used);
if (offset > 0) buffer = alloca(offset);
} else {
intptr_t offset = ((intptr_t)context->base + context->used) - (intptr_t)stack_pointer;
if (offset > 0) buffer = alloca(offset);
}
assert(context->used > 0);
coroutine_restore_stack_padded(context, buffer);
}
struct coroutine_context *coroutine_transfer(struct coroutine_context *current, struct coroutine_context *target)
{
struct coroutine_context *previous = target->from;
// In theory, either this condition holds true, or we should assign the base address to target:
assert(current->base == target->base);
// If you are trying to copy the coroutine to a different thread
// target->base = current->base
target->from = current;
assert(current != target);
// It's possible to come here, even thought the current fiber has been terminated. We are never going to return so we don't bother saving the stack.
if (current->stack) {
if (coroutine_save_stack(current) == 0) {
coroutine_restore_stack(target);
}
} else {
coroutine_restore_stack(target);
}
target->from = previous;
return target;
}

Просмотреть файл

@ -1,98 +0,0 @@
#ifndef COROUTINE_COPY_CONTEXT_H
#define COROUTINE_COPY_CONTEXT_H 1
/*
* This file is part of the "Coroutine" project and released under the MIT License.
*
* Created by Samuel Williams on 27/6/2019.
* Copyright, 2019, by Samuel Williams.
*/
#pragma once
#include <assert.h>
#include <stddef.h>
#include <setjmp.h>
#include <string.h>
#include <stdlib.h>
/* OpenBSD supports alloca, but does not include alloca.h */
#ifndef __OpenBSD__
#include <alloca.h>
#endif
#define COROUTINE __attribute__((noreturn)) void
#ifdef HAVE_STDINT_H
#include <stdint.h>
#if INTPTR_MAX <= INT32_MAX
#define COROUTINE_LIMITED_ADDRESS_SPACE
#endif
#endif
// This stack copying implementation which uses a private stack for each coroutine, including the main one.
#define COROUTINE_PRIVATE_STACK
struct coroutine_context
{
// Private stack:
void *stack;
size_t size, used;
// The top (or bottom) of the currently executing stack:
void *base;
jmp_buf state;
struct coroutine_context *from;
};
typedef COROUTINE(*coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);
int coroutine_save_stack(struct coroutine_context * context);
COROUTINE coroutine_restore_stack(struct coroutine_context *context);
// @param stack The private stack area memory allocation (pointer to lowest address).
// @param size The size of the private stack area.
// @param base A stack pointer to the base of the main stack. On x86 hardware, this is the upper extent of the region that will be copied to the private stack.
static inline void coroutine_initialize_main(struct coroutine_context *context, void *stack, size_t size, void *base) {
assert(stack);
assert(size >= 1024);
context->stack = stack;
context->size = size;
context->used = 0;
assert(base);
context->base = base;
context->from = NULL;
}
// @param start The start function to invoke.
static inline void coroutine_initialize(
struct coroutine_context *context,
coroutine_start start,
void *stack,
size_t size,
void *base
) {
assert(start);
coroutine_initialize_main(context, stack, size, base);
if (coroutine_save_stack(context)) {
start(context->from, context);
}
}
struct coroutine_context *coroutine_transfer(struct coroutine_context *current, register struct coroutine_context *target);
static inline void coroutine_destroy(struct coroutine_context *context)
{
context->stack = NULL;
context->size = 0;
context->from = NULL;
}
#endif /* COROUTINE_COPY_CONTEXT_H */

Просмотреть файл

@ -26,6 +26,7 @@ struct coroutine_context
emscripten_fiber_t state;
coroutine_start entry_func;
struct coroutine_context * from;
void *argument;
};
COROUTINE coroutine_trampoline(void * _context);

Просмотреть файл

@ -19,6 +19,7 @@ enum {
struct coroutine_context
{
void **stack_pointer;
void *argument;
};
typedef COROUTINE(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);

268
coroutine/pthread/Context.c Normal file
Просмотреть файл

@ -0,0 +1,268 @@
/*
* This file is part of the "Coroutine" project and released under the MIT License.
*
* Created by Samuel Williams on 24/6/2021.
* Copyright, 2021, by Samuel Williams.
*/
#include "Context.h"
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
static const int DEBUG = 0;
static
int check(const char * message, int result) {
if (result) {
switch (result) {
case EDEADLK:
if (DEBUG) fprintf(stderr, "deadlock detected result=%d errno=%d\n", result, errno);
break;
default:
if (DEBUG) fprintf(stderr, "error detected result=%d errno=%d\n", result, errno);
perror(message);
}
}
assert(result == 0);
return result;
}
void coroutine_initialize_main(struct coroutine_context * context) {
context->id = pthread_self();
check("coroutine_initialize_main:pthread_cond_init",
pthread_cond_init(&context->schedule, NULL)
);
context->shared = (struct coroutine_shared*)malloc(sizeof(struct coroutine_shared));
assert(context->shared);
context->shared->main = context;
context->shared->count = 1;
if (DEBUG) {
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
check("coroutine_initialize_main:pthread_mutex_init",
pthread_mutex_init(&context->shared->guard, &attr)
);
} else {
check("coroutine_initialize_main:pthread_mutex_init",
pthread_mutex_init(&context->shared->guard, NULL)
);
}
}
static
void coroutine_release(struct coroutine_context *context) {
if (context->shared) {
size_t count = (context->shared->count -= 1);
if (count == 0) {
if (DEBUG) fprintf(stderr, "coroutine_release:pthread_mutex_destroy(%p)\n", &context->shared->guard);
pthread_mutex_destroy(&context->shared->guard);
free(context->shared);
}
context->shared = NULL;
if (DEBUG) fprintf(stderr, "coroutine_release:pthread_cond_destroy(%p)\n", &context->schedule);
pthread_cond_destroy(&context->schedule);
}
}
void coroutine_initialize(
struct coroutine_context *context,
coroutine_start start,
void *stack,
size_t size
) {
assert(start && stack && size >= 1024);
// We will create the thread when we first transfer, but save the details now:
context->shared = NULL;
context->start = start;
context->stack = stack;
context->size = size;
}
static
int is_locked(pthread_mutex_t * mutex) {
int result = pthread_mutex_trylock(mutex);
// If we could successfully lock the mutex:
if (result == 0) {
pthread_mutex_unlock(mutex);
// We could lock the mutex, so it wasn't locked:
return 0;
} else {
// Otherwise we couldn't lock it because it's already locked:
return 1;
}
}
static
void coroutine_guard_unlock(void * _context)
{
struct coroutine_context * context = _context;
if (DEBUG) fprintf(stderr, "coroutine_guard_unlock:pthread_mutex_unlock\n");
check("coroutine_guard_unlock:pthread_mutex_unlock",
pthread_mutex_unlock(&context->shared->guard)
);
}
static
void coroutine_wait(struct coroutine_context *context)
{
if (DEBUG) fprintf(stderr, "coroutine_wait:pthread_mutex_lock(guard=%p is_locked=%d)\n", &context->shared->guard, is_locked(&context->shared->guard));
check("coroutine_wait:pthread_mutex_lock",
pthread_mutex_lock(&context->shared->guard)
);
if (DEBUG) fprintf(stderr, "coroutine_wait:pthread_mutex_unlock(guard)\n");
pthread_mutex_unlock(&context->shared->guard);
}
static
void coroutine_trampoline_cleanup(void *_context) {
struct coroutine_context * context = _context;
coroutine_release(context);
}
void * coroutine_trampoline(void * _context)
{
struct coroutine_context * context = _context;
assert(context->shared);
pthread_cleanup_push(coroutine_trampoline_cleanup, context);
coroutine_wait(context);
context->start(context->from, context);
pthread_cleanup_pop(1);
return NULL;
}
static
int coroutine_create_thread(struct coroutine_context *context)
{
int result;
pthread_attr_t attr;
result = pthread_attr_init(&attr);
if (result != 0) {
return result;
}
result = pthread_attr_setstack(&attr, context->stack, (size_t)context->size);
if (result != 0) {
pthread_attr_destroy(&attr);
return result;
}
result = pthread_cond_init(&context->schedule, NULL);
if (result != 0) {
pthread_attr_destroy(&attr);
return result;
}
result = pthread_create(&context->id, &attr, coroutine_trampoline, context);
if (result != 0) {
pthread_attr_destroy(&attr);
if (DEBUG) fprintf(stderr, "coroutine_create_thread:pthread_cond_destroy(%p)\n", &context->schedule);
pthread_cond_destroy(&context->schedule);
return result;
}
context->shared->count += 1;
return result;
}
struct coroutine_context * coroutine_transfer(struct coroutine_context * current, struct coroutine_context * target)
{
assert(current->shared);
struct coroutine_context * previous = target->from;
target->from = current;
if (DEBUG) fprintf(stderr, "coroutine_transfer:pthread_mutex_lock(guard=%p is_locked=%d)\n", &current->shared->guard, is_locked(&current->shared->guard));
pthread_mutex_lock(&current->shared->guard);
pthread_cleanup_push(coroutine_guard_unlock, current);
// First transfer:
if (target->shared == NULL) {
target->shared = current->shared;
if (DEBUG) fprintf(stderr, "coroutine_transfer:coroutine_create_thread...\n");
if (coroutine_create_thread(target)) {
if (DEBUG) fprintf(stderr, "coroutine_transfer:coroutine_create_thread failed\n");
target->shared = NULL;
target->from = previous;
return NULL;
}
} else {
if (DEBUG) fprintf(stderr, "coroutine_transfer:pthread_cond_signal(target)\n");
pthread_cond_signal(&target->schedule);
}
// A side effect of acting upon a cancellation request while in a condition wait is that the mutex is (in effect) re-acquired before calling the first cancellation cleanup handler. If cancelled, pthread_cond_wait immediately invokes cleanup handlers.
if (DEBUG) fprintf(stderr, "coroutine_transfer:pthread_cond_wait(schedule=%p, guard=%p, is_locked=%d)\n", &current->schedule, &current->shared->guard, is_locked(&current->shared->guard));
check("coroutine_transfer:pthread_cond_wait",
pthread_cond_wait(&current->schedule, &current->shared->guard)
);
if (DEBUG) fprintf(stderr, "coroutine_transfer:pthread_cleanup_pop\n");
pthread_cleanup_pop(1);
#ifdef __FreeBSD__
// Apparently required for FreeBSD:
pthread_testcancel();
#endif
target->from = previous;
return target;
}
static
void coroutine_join(struct coroutine_context * context) {
if (DEBUG) fprintf(stderr, "coroutine_join:pthread_cancel\n");
check("coroutine_join:pthread_cancel",
pthread_cancel(context->id)
);
if (DEBUG) fprintf(stderr, "coroutine_join:pthread_join\n");
check("coroutine_join:pthread_join",
pthread_join(context->id, NULL)
);
if (DEBUG) fprintf(stderr, "coroutine_join:pthread_join done\n");
}
void coroutine_destroy(struct coroutine_context * context)
{
if (DEBUG) fprintf(stderr, "coroutine_destroy\n");
assert(context);
// We are already destroyed or never created:
if (context->shared == NULL) return;
if (context == context->shared->main) {
context->shared->main = NULL;
coroutine_release(context);
} else {
coroutine_join(context);
assert(context->shared == NULL);
}
}

Просмотреть файл

@ -0,0 +1,63 @@
/*
* This file is part of the "Coroutine" project and released under the MIT License.
*
* Created by Samuel Williams on 24/6/2021.
* Copyright, 2021, by Samuel Williams.
*/
#pragma once
#include <assert.h>
#include <stddef.h>
#include <pthread.h>
#define COROUTINE void
#define COROUTINE_PTHREAD_CONTEXT
#ifdef HAVE_STDINT_H
#include <stdint.h>
#if INTPTR_MAX <= INT32_MAX
#define COROUTINE_LIMITED_ADDRESS_SPACE
#endif
#endif
struct coroutine_context;
struct coroutine_shared
{
pthread_mutex_t guard;
struct coroutine_context * main;
size_t count;
};
typedef COROUTINE(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);
struct coroutine_context
{
struct coroutine_shared * shared;
coroutine_start start;
void *argument;
void *stack;
size_t size;
pthread_t id;
pthread_cond_t schedule;
struct coroutine_context * from;
};
void coroutine_initialize_main(struct coroutine_context * context);
void coroutine_initialize(
struct coroutine_context *context,
coroutine_start start,
void *stack,
size_t size
);
struct coroutine_context * coroutine_transfer(struct coroutine_context * current, struct coroutine_context * target);
void coroutine_destroy(struct coroutine_context * context);

Просмотреть файл

@ -12,6 +12,7 @@ enum {COROUTINE_REGISTERS = 0xd0 / 8};
struct coroutine_context
{
void **stack_pointer;
void *argument;
};
typedef COROUTINE(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);

Просмотреть файл

@ -11,6 +11,7 @@
#if defined(__sun) && !defined(__EXTENSIONS__)
#define __EXTENSIONS__
#endif
#include "Context.h"
void coroutine_trampoline(void * _start, void * _context)

Просмотреть файл

@ -27,6 +27,7 @@ struct coroutine_context
{
ucontext_t state;
struct coroutine_context * from;
void *argument;
};
typedef COROUTINE(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);

Просмотреть файл

@ -24,6 +24,7 @@ enum {COROUTINE_REGISTERS = 4};
struct coroutine_context
{
void **stack_pointer;
void *argument;
};
typedef void(__fastcall * coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);

Просмотреть файл

@ -25,6 +25,7 @@ enum {
struct coroutine_context
{
void **stack_pointer;
void *argument;
};
typedef void(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);

Просмотреть файл

@ -23,6 +23,7 @@ enum {COROUTINE_REGISTERS = 4};
struct coroutine_context
{
void **stack_pointer;
void *argument;
};
typedef COROUTINE(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self) __attribute__((fastcall));

5
eval.c
Просмотреть файл

@ -1112,13 +1112,9 @@ rb_protect(VALUE (* proc) (VALUE), VALUE data, int *pstate)
volatile enum ruby_tag_type state;
rb_execution_context_t * volatile ec = GET_EC();
rb_control_frame_t *volatile cfp = ec->cfp;
struct rb_vm_protect_tag protect_tag;
rb_jmpbuf_t org_jmpbuf;
protect_tag.prev = ec->protect_tag;
EC_PUSH_TAG(ec);
ec->protect_tag = &protect_tag;
MEMCPY(&org_jmpbuf, &rb_ec_thread_ptr(ec)->root_jmpbuf, rb_jmpbuf_t, 1);
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
SAVE_ROOT_JMPBUF(rb_ec_thread_ptr(ec), result = (*proc) (data));
@ -1127,7 +1123,6 @@ rb_protect(VALUE (* proc) (VALUE), VALUE data, int *pstate)
rb_vm_rewind_cfp(ec, cfp);
}
MEMCPY(&rb_ec_thread_ptr(ec)->root_jmpbuf, &org_jmpbuf, rb_jmpbuf_t, 1);
ec->protect_tag = protect_tag.prev;
EC_POP_TAG();
if (pstate != NULL) *pstate = state;

Просмотреть файл

@ -124,7 +124,7 @@ LONG WINAPI rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *);
SAVE_ROOT_JMPBUF_AFTER_STMT \
} \
else { \
rb_fiber_start(); \
rb_fiber_start(th->ec->fiber_ptr); \
} while (0)
#define EC_PUSH_TAG(ec) do { \
@ -280,7 +280,7 @@ VALUE rb_make_exception(int argc, const VALUE *argv);
NORETURN(void rb_method_name_error(VALUE, VALUE));
NORETURN(void rb_fiber_start(void));
void rb_fiber_start(rb_fiber_t*);
NORETURN(void rb_print_undef(VALUE, ID, rb_method_visibility_t));
NORETURN(void rb_print_undef_str(VALUE, VALUE));

Просмотреть файл

@ -14,7 +14,6 @@
#include "ruby/internal/config.h"
#include "ruby/fiber/scheduler.h"
#include "coroutine/Stack.h"
#include <ctype.h>
#include <errno.h>
@ -1350,29 +1349,26 @@ rb_process_status_wait(rb_pid_t pid, int flags)
if (result != Qundef) return result;
}
COROUTINE_STACK_LOCAL(struct waitpid_state, w);
struct waitpid_state waitpid_state;
waitpid_state_init(w, pid, flags);
w->ec = GET_EC();
waitpid_state_init(&waitpid_state, pid, flags);
waitpid_state.ec = GET_EC();
if (WAITPID_USE_SIGCHLD) {
waitpid_wait(w);
waitpid_wait(&waitpid_state);
}
else {
waitpid_no_SIGCHLD(w);
waitpid_no_SIGCHLD(&waitpid_state);
}
rb_pid_t ret = w->ret;
int s = w->status, e = w->errnum;
COROUTINE_STACK_FREE(w);
if (waitpid_state.ret == 0) return Qnil;
if (ret == 0) return Qnil;
if (ret > 0 && ruby_nocldwait) {
ret = -1;
e = ECHILD;
if (waitpid_state.ret > 0 && ruby_nocldwait) {
waitpid_state.ret = -1;
waitpid_state.errnum = ECHILD;
}
return rb_process_status_new(ret, s, e);
return rb_process_status_new(waitpid_state.ret, waitpid_state.status, waitpid_state.errnum);
}
/*

Просмотреть файл

@ -35,7 +35,7 @@ class TestFiber < Test::Unit::TestCase
def test_many_fibers
skip 'This is unstable on GitHub Actions --jit-wait. TODO: debug it' if defined?(RubyVM::JIT) && RubyVM::JIT.enabled?
max = 10_000
max = 1000
assert_equal(max, max.times{
Fiber.new{}
})
@ -391,8 +391,7 @@ class TestFiber < Test::Unit::TestCase
Fiber.new {
xpid = fork do
# enough to trigger GC on old root fiber
count = 10000
count = 1000 if /solaris|openbsd/i =~ RUBY_PLATFORM
count = 1000
count.times do
Fiber.new {}.transfer
Fiber.new { Fiber.yield }

Просмотреть файл

@ -1798,23 +1798,24 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
rb_execution_context_t * volatile ec = GET_EC();
volatile int saved_errno = 0;
enum ruby_tag_type state;
COROUTINE_STACK_LOCAL(struct waiting_fd, wfd);
wfd->fd = fd;
wfd->th = rb_ec_thread_ptr(ec);
struct waiting_fd waiting_fd = {
.fd = fd,
.th = rb_ec_thread_ptr(ec)
};
RB_VM_LOCK_ENTER();
{
list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &wfd->wfd_node);
list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node);
}
RB_VM_LOCK_LEAVE();
EC_PUSH_TAG(ec);
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
BLOCKING_REGION(wfd->th, {
val = func(data1);
saved_errno = errno;
}, ubf_select, wfd->th, FALSE);
BLOCKING_REGION(waiting_fd.th, {
val = func(data1);
saved_errno = errno;
}, ubf_select, waiting_fd.th, FALSE);
}
EC_POP_TAG();
@ -1824,13 +1825,12 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
*/
RB_VM_LOCK_ENTER();
{
list_del(&wfd->wfd_node);
COROUTINE_STACK_FREE(wfd);
list_del(&waiting_fd.wfd_node);
}
RB_VM_LOCK_LEAVE();
if (state) {
EC_JUMP_TAG(ec, state);
EC_JUMP_TAG(ec, state);
}
/* TODO: check func() */
RUBY_VM_CHECK_INTS_BLOCKING(ec);

Просмотреть файл

@ -608,7 +608,7 @@ null_func(int i)
/* null */
}
static rb_thread_t *
rb_thread_t *
ruby_thread_from_native(void)
{
#ifdef RB_THREAD_LOCAL_SPECIFIER
@ -618,7 +618,7 @@ ruby_thread_from_native(void)
#endif
}
static int
int
ruby_thread_set_native(rb_thread_t *th)
{
if (th && th->ec) {

Просмотреть файл

@ -1,6 +1,5 @@
/* included by thread.c */
#include "ccan/list/list.h"
#include "coroutine/Stack.h"
static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
static VALUE rb_eClosedQueueError;
@ -277,12 +276,10 @@ call_rb_fiber_scheduler_block(VALUE mutex)
}
static VALUE
delete_from_waitq(VALUE v)
delete_from_waitq(VALUE value)
{
struct sync_waiter *w = (void *)v;
list_del(&w->node);
COROUTINE_STACK_FREE(w);
struct sync_waiter *sync_waiter = (void *)value;
list_del(&sync_waiter->node);
return Qnil;
}
@ -309,14 +306,15 @@ do_mutex_lock(VALUE self, int interruptible_p)
while (mutex->fiber != fiber) {
VALUE scheduler = rb_fiber_scheduler_current();
if (scheduler != Qnil) {
COROUTINE_STACK_LOCAL(struct sync_waiter, w);
w->self = self;
w->th = th;
w->fiber = fiber;
struct sync_waiter sync_waiter = {
.self = self,
.th = th,
.fiber = fiber
};
list_add_tail(&mutex->waitq, &w->node);
list_add_tail(&mutex->waitq, &sync_waiter.node);
rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)w);
rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter);
if (!mutex->fiber) {
mutex->fiber = fiber;
@ -341,18 +339,17 @@ do_mutex_lock(VALUE self, int interruptible_p)
patrol_thread = th;
}
COROUTINE_STACK_LOCAL(struct sync_waiter, w);
w->self = self;
w->th = th;
w->fiber = fiber;
struct sync_waiter sync_waiter = {
.self = self,
.th = th,
.fiber = fiber
};
list_add_tail(&mutex->waitq, &w->node);
list_add_tail(&mutex->waitq, &sync_waiter.node);
native_sleep(th, timeout); /* release GVL */
list_del(&w->node);
COROUTINE_STACK_FREE(w);
list_del(&sync_waiter.node);
if (!mutex->fiber) {
mutex->fiber = fiber;
@ -984,8 +981,6 @@ queue_sleep_done(VALUE p)
list_del(&qw->w.node);
qw->as.q->num_waiting--;
COROUTINE_STACK_FREE(qw);
return Qfalse;
}
@ -997,8 +992,6 @@ szqueue_sleep_done(VALUE p)
list_del(&qw->w.node);
qw->as.sq->num_waiting_push--;
COROUTINE_STACK_FREE(qw);
return Qfalse;
}
@ -1020,17 +1013,15 @@ queue_do_pop(VALUE self, struct rb_queue *q, int should_block)
assert(RARRAY_LEN(q->que) == 0);
assert(queue_closed_p(self) == 0);
COROUTINE_STACK_LOCAL(struct queue_waiter, qw);
struct queue_waiter queue_waiter = {
.w = {.self = self, .th = ec->thread_ptr, .fiber = ec->fiber_ptr},
.as = {.q = q}
};
qw->w.self = self;
qw->w.th = ec->thread_ptr;
qw->w.fiber = ec->fiber_ptr;
list_add_tail(queue_waitq(queue_waiter.as.q), &queue_waiter.w.node);
queue_waiter.as.q->num_waiting++;
qw->as.q = q;
list_add_tail(queue_waitq(qw->as.q), &qw->w.node);
qw->as.q->num_waiting++;
rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)qw);
rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)&queue_waiter);
}
}
@ -1263,18 +1254,17 @@ rb_szqueue_push(int argc, VALUE *argv, VALUE self)
}
else {
rb_execution_context_t *ec = GET_EC();
COROUTINE_STACK_LOCAL(struct queue_waiter, qw);
struct queue_waiter queue_waiter = {
.w = {.self = self, .th = ec->thread_ptr, .fiber = ec->fiber_ptr},
.as = {.sq = sq}
};
struct list_head *pushq = szqueue_pushq(sq);
qw->w.self = self;
qw->w.th = ec->thread_ptr;
qw->w.fiber = ec->fiber_ptr;
qw->as.sq = sq;
list_add_tail(pushq, &qw->w.node);
list_add_tail(pushq, &queue_waiter.w.node);
sq->num_waiting_push++;
rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)qw);
rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)&queue_waiter);
}
}
@ -1505,13 +1495,14 @@ rb_condvar_wait(int argc, VALUE *argv, VALUE self)
rb_scan_args(argc, argv, "11", &args.mutex, &args.timeout);
COROUTINE_STACK_LOCAL(struct sync_waiter, w);
w->self = args.mutex;
w->th = ec->thread_ptr;
w->fiber = ec->fiber_ptr;
struct sync_waiter sync_waiter = {
.self = args.mutex,
.th = ec->thread_ptr,
.fiber = ec->fiber_ptr
};
list_add_tail(&cv->waitq, &w->node);
rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)w);
list_add_tail(&cv->waitq, &sync_waiter.node);
rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter);
return self;
}

Просмотреть файл

@ -129,13 +129,13 @@ gvl_destroy(rb_global_vm_lock_t *gvl)
CloseHandle(gvl->lock);
}
static rb_thread_t *
rb_thread_t *
ruby_thread_from_native(void)
{
return TlsGetValue(ruby_native_thread_key);
}
static int
int
ruby_thread_set_native(rb_thread_t *th)
{
if (th && th->ec) {

Просмотреть файл

@ -830,10 +830,6 @@ STATIC_ASSERT(rb_vm_tag_buf_end,
offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
sizeof(struct rb_vm_tag));
struct rb_vm_protect_tag {
struct rb_vm_protect_tag *prev;
};
struct rb_unblock_callback {
rb_unblock_function_t *func;
void *arg;
@ -869,7 +865,6 @@ struct rb_execution_context_struct {
rb_control_frame_t *cfp;
struct rb_vm_tag *tag;
struct rb_vm_protect_tag *protect_tag;
/* interrupt flags */
rb_atomic_t interrupt_flag;
@ -1730,6 +1725,8 @@ rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_cont
int rb_vm_get_sourceline(const rb_control_frame_t *);
void rb_vm_stack_to_heap(rb_execution_context_t *ec);
void ruby_thread_init_stack(rb_thread_t *th);
rb_thread_t * ruby_thread_from_native(void);
int ruby_thread_set_native(rb_thread_t *th);
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);