ruby/scheduler.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

646 строки
21 KiB
C
Исходник Обычный вид История

2020-08-20 04:51:45 +03:00
/**********************************************************************
scheduler.c
$Author$
Copyright (C) 2020 Samuel Grant Dawson Williams
**********************************************************************/
2020-10-16 04:25:58 +03:00
#include "vm_core.h"
#include "ruby/fiber/scheduler.h"
2020-08-20 04:51:45 +03:00
#include "ruby/io.h"
2021-07-02 13:41:16 +03:00
#include "ruby/io/buffer.h"
#include "internal/thread.h"
2020-08-20 04:51:45 +03:00
static ID id_close;
static ID id_scheduler_close;
static ID id_block;
static ID id_unblock;
static ID id_timeout_after;
static ID id_kernel_sleep;
static ID id_process_wait;
static ID id_io_read, id_io_pread;
static ID id_io_write, id_io_pwrite;
2020-08-20 04:51:45 +03:00
static ID id_io_wait;
static ID id_io_select;
2021-07-02 13:41:16 +03:00
static ID id_io_close;
2020-08-20 04:51:45 +03:00
static ID id_address_resolve;
static ID id_fiber_schedule;
/*
* Document-class: Fiber::Scheduler
*
* This is not an existing class, but documentation of the interface that Scheduler
* object should comply to in order to be used as argument to Fiber.scheduler and handle non-blocking
* fibers. See also the "Non-blocking fibers" section in Fiber class docs for explanations
* of some concepts.
*
* Scheduler's behavior and usage are expected to be as follows:
*
* * When the execution in the non-blocking Fiber reaches some blocking operation (like
* sleep, wait for a process, or a non-ready I/O), it calls some of the scheduler's
* hook methods, listed below.
* * Scheduler somehow registers what the current fiber is waiting on, and yields control
* to other fibers with Fiber.yield (so the fiber would be suspended while expecting its
* wait to end, and other fibers in the same thread can perform)
* * At the end of the current thread execution, the scheduler's method #scheduler_close is called
* * The scheduler runs into a wait loop, checking all the blocked fibers (which it has
* registered on hook calls) and resuming them when the awaited resource is ready
* (e.g. I/O ready or sleep time elapsed).
*
* This way concurrent execution will be achieved transparently for every
* individual Fiber's code.
*
* Scheduler implementations are provided by gems, like
* Async[https://github.com/socketry/async].
*
* Hook methods are:
*
* * #io_wait, #io_read, #io_write, #io_pread, #io_pwrite, and #io_select, #io_close
* * #process_wait
* * #kernel_sleep
* * #timeout_after
* * #address_resolve
* * #block and #unblock
* * (the list is expanded as Ruby developers make more methods having non-blocking calls)
*
* When not specified otherwise, the hook implementations are mandatory: if they are not
* implemented, the methods trying to call hook will fail. To provide backward compatibility,
* in the future hooks will be optional (if they are not implemented, due to the scheduler
* being created for the older Ruby version, the code which needs this hook will not fail,
* and will just behave in a blocking fashion).
*
* It is also strongly recommended that the scheduler implements the #fiber method, which is
* delegated to by Fiber.schedule.
*
* Sample _toy_ implementation of the scheduler can be found in Ruby's code, in
* <tt>test/fiber/scheduler.rb</tt>
*
*/
2020-08-20 04:51:45 +03:00
void
Init_Fiber_Scheduler(void)
2020-08-20 04:51:45 +03:00
{
id_close = rb_intern_const("close");
id_scheduler_close = rb_intern_const("scheduler_close");
id_block = rb_intern_const("block");
id_unblock = rb_intern_const("unblock");
id_timeout_after = rb_intern_const("timeout_after");
id_kernel_sleep = rb_intern_const("kernel_sleep");
id_process_wait = rb_intern_const("process_wait");
2020-08-20 04:51:45 +03:00
id_io_read = rb_intern_const("io_read");
id_io_pread = rb_intern_const("io_pread");
2020-08-20 04:51:45 +03:00
id_io_write = rb_intern_const("io_write");
id_io_pwrite = rb_intern_const("io_pwrite");
2020-08-20 04:51:45 +03:00
id_io_wait = rb_intern_const("io_wait");
id_io_select = rb_intern_const("io_select");
2021-07-02 13:41:16 +03:00
id_io_close = rb_intern_const("io_close");
id_address_resolve = rb_intern_const("address_resolve");
id_fiber_schedule = rb_intern_const("fiber");
#if 0 /* for RDoc */
rb_cFiberScheduler = rb_define_class_under(rb_cFiber, "Scheduler", rb_cObject);
rb_define_method(rb_cFiberScheduler, "close", rb_fiber_scheduler_close, 0);
rb_define_method(rb_cFiberScheduler, "process_wait", rb_fiber_scheduler_process_wait, 2);
rb_define_method(rb_cFiberScheduler, "io_wait", rb_fiber_scheduler_io_wait, 3);
rb_define_method(rb_cFiberScheduler, "io_read", rb_fiber_scheduler_io_read, 4);
rb_define_method(rb_cFiberScheduler, "io_write", rb_fiber_scheduler_io_write, 4);
rb_define_method(rb_cFiberScheduler, "kernel_sleep", rb_fiber_scheduler_kernel_sleep, 1);
rb_define_method(rb_cFiberScheduler, "address_resolve", rb_fiber_scheduler_address_resolve, 1);
rb_define_method(rb_cFiberScheduler, "timeout_after", rb_fiber_scheduler_timeout_after, 3);
rb_define_method(rb_cFiberScheduler, "block", rb_fiber_scheduler_block, 2);
rb_define_method(rb_cFiberScheduler, "unblock", rb_fiber_scheduler_unblock, 2);
rb_define_method(rb_cFiberScheduler, "fiber", rb_fiber_scheduler, -2);
#endif
2020-08-20 04:51:45 +03:00
}
2020-10-16 04:25:58 +03:00
VALUE
rb_fiber_scheduler_get(void)
2020-10-16 04:25:58 +03:00
{
VM_ASSERT(ruby_thread_has_gvl_p());
2020-10-16 04:25:58 +03:00
rb_thread_t *thread = GET_THREAD();
VM_ASSERT(thread);
return thread->scheduler;
}
static void
verify_interface(VALUE scheduler)
{
if (!rb_respond_to(scheduler, id_block)) {
rb_raise(rb_eArgError, "Scheduler must implement #block");
}
if (!rb_respond_to(scheduler, id_unblock)) {
rb_raise(rb_eArgError, "Scheduler must implement #unblock");
}
if (!rb_respond_to(scheduler, id_kernel_sleep)) {
rb_raise(rb_eArgError, "Scheduler must implement #kernel_sleep");
}
if (!rb_respond_to(scheduler, id_io_wait)) {
rb_raise(rb_eArgError, "Scheduler must implement #io_wait");
}
}
2020-10-16 04:25:58 +03:00
VALUE
rb_fiber_scheduler_set(VALUE scheduler)
2020-10-16 04:25:58 +03:00
{
VM_ASSERT(ruby_thread_has_gvl_p());
2020-10-16 04:25:58 +03:00
rb_thread_t *thread = GET_THREAD();
VM_ASSERT(thread);
if (scheduler != Qnil) {
verify_interface(scheduler);
}
// We invoke Scheduler#close when setting it to something else, to ensure
// the previous scheduler runs to completion before changing the scheduler.
// That way, we do not need to consider interactions, e.g., of a Fiber from
// the previous scheduler with the new scheduler.
2020-10-16 04:25:58 +03:00
if (thread->scheduler != Qnil) {
rb_fiber_scheduler_close(thread->scheduler);
2020-10-16 04:25:58 +03:00
}
thread->scheduler = scheduler;
return thread->scheduler;
}
static VALUE
rb_fiber_scheduler_current_for_threadptr(rb_thread_t *thread)
2020-10-16 04:25:58 +03:00
{
VM_ASSERT(thread);
if (thread->blocking == 0) {
return thread->scheduler;
}
else {
2020-10-16 04:25:58 +03:00
return Qnil;
}
}
VALUE
rb_fiber_scheduler_current(void)
2020-10-16 04:25:58 +03:00
{
return rb_fiber_scheduler_current_for_threadptr(GET_THREAD());
2020-10-16 04:25:58 +03:00
}
VALUE rb_fiber_scheduler_current_for_thread(VALUE thread)
2020-10-16 04:25:58 +03:00
{
return rb_fiber_scheduler_current_for_threadptr(rb_thread_ptr(thread));
2020-10-16 04:25:58 +03:00
}
/*
*
* Document-method: Fiber::Scheduler#close
*
* Called when the current thread exits. The scheduler is expected to implement this
* method in order to allow all waiting fibers to finalize their execution.
*
* The suggested pattern is to implement the main event loop in the #close method.
*
*/
VALUE
rb_fiber_scheduler_close(VALUE scheduler)
{
VM_ASSERT(ruby_thread_has_gvl_p());
VALUE result;
// The reason for calling `scheduler_close` before calling `close` is for
// legacy schedulers which implement `close` and expect the user to call
// it. Subsequently, that method would call `Fiber.set_scheduler(nil)`
// which should call `scheduler_close`. If it were to call `close`, it
// would create an infinite loop.
result = rb_check_funcall(scheduler, id_scheduler_close, 0, NULL);
2022-11-15 07:24:08 +03:00
if (!UNDEF_P(result)) return result;
result = rb_check_funcall(scheduler, id_close, 0, NULL);
2022-11-15 07:24:08 +03:00
if (!UNDEF_P(result)) return result;
2020-10-01 07:45:50 +03:00
return Qnil;
}
2020-08-20 04:51:45 +03:00
VALUE
rb_fiber_scheduler_make_timeout(struct timeval *timeout)
{
2020-08-20 04:51:45 +03:00
if (timeout) {
return rb_float_new((double)timeout->tv_sec + (0.000001f * timeout->tv_usec));
}
return Qnil;
}
/*
* Document-method: Fiber::Scheduler#kernel_sleep
* call-seq: kernel_sleep(duration = nil)
*
* Invoked by Kernel#sleep and Mutex#sleep and is expected to provide
* an implementation of sleeping in a non-blocking way. Implementation might
* register the current fiber in some list of "which fiber wait until what
* moment", call Fiber.yield to pass control, and then in #close resume
* the fibers whose wait period has elapsed.
*
*/
VALUE
2021-03-30 07:33:15 +03:00
rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE timeout)
{
2021-03-30 07:33:15 +03:00
return rb_funcall(scheduler, id_kernel_sleep, 1, timeout);
}
2021-02-11 09:17:54 +03:00
VALUE
2021-03-30 07:33:15 +03:00
rb_fiber_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE * argv)
2021-02-11 09:17:54 +03:00
{
2021-03-30 07:33:15 +03:00
return rb_funcallv(scheduler, id_kernel_sleep, argc, argv);
2021-02-11 09:17:54 +03:00
}
2021-03-30 07:33:15 +03:00
#if 0
/*
* Document-method: Fiber::Scheduler#timeout_after
* call-seq: timeout_after(duration, exception_class, *exception_arguments, &block) -> result of block
*
* Invoked by Timeout.timeout to execute the given +block+ within the given
* +duration+. It can also be invoked directly by the scheduler or user code.
*
* Attempt to limit the execution time of a given +block+ to the given
* +duration+ if possible. When a non-blocking operation causes the +block+'s
* execution time to exceed the specified +duration+, that non-blocking
* operation should be interrupted by raising the specified +exception_class+
* constructed with the given +exception_arguments+.
*
* General execution timeouts are often considered risky. This implementation
* will only interrupt non-blocking operations. This is by design because it's
* expected that non-blocking operations can fail for a variety of
* unpredictable reasons, so applications should already be robust in handling
* these conditions and by implication timeouts.
*
* However, as a result of this design, if the +block+ does not invoke any
* non-blocking operations, it will be impossible to interrupt it. If you
* desire to provide predictable points for timeouts, consider adding
* +sleep(0)+.
*
* If the block is executed successfully, its result will be returned.
*
* The exception will typically be raised using Fiber#raise.
*/
VALUE
2021-03-30 07:33:15 +03:00
rb_fiber_scheduler_timeout_after(VALUE scheduler, VALUE timeout, VALUE exception, VALUE message)
2020-08-20 04:51:45 +03:00
{
2021-03-30 07:33:15 +03:00
VALUE arguments[] = {
timeout, exception, message
};
return rb_check_funcall(scheduler, id_timeout_after, 3, arguments);
2020-08-20 04:51:45 +03:00
}
VALUE
2021-03-30 07:33:15 +03:00
rb_fiber_scheduler_timeout_afterv(VALUE scheduler, int argc, VALUE * argv)
2020-08-20 04:51:45 +03:00
{
2021-03-30 07:33:15 +03:00
return rb_check_funcall(scheduler, id_timeout_after, argc, argv);
2020-08-20 04:51:45 +03:00
}
2021-03-30 07:33:15 +03:00
#endif
2020-08-20 04:51:45 +03:00
/*
* Document-method: Fiber::Scheduler#process_wait
* call-seq: process_wait(pid, flags)
*
* Invoked by Process::Status.wait in order to wait for a specified process.
* See that method description for arguments description.
*
* Suggested minimal implementation:
*
* Thread.new do
* Process::Status.wait(pid, flags)
* end.value
*
* This hook is optional: if it is not present in the current scheduler,
* Process::Status.wait will behave as a blocking method.
*
* Expected to return a Process::Status instance.
*/
VALUE
rb_fiber_scheduler_process_wait(VALUE scheduler, rb_pid_t pid, int flags)
{
VALUE arguments[] = {
PIDT2NUM(pid), RB_INT2NUM(flags)
};
2021-02-09 10:59:15 +03:00
return rb_check_funcall(scheduler, id_process_wait, 2, arguments);
}
/*
* Document-method: Fiber::Scheduler#block
* call-seq: block(blocker, timeout = nil)
*
* Invoked by methods like Thread.join, and by Mutex, to signify that current
* Fiber is blocked until further notice (e.g. #unblock) or until +timeout+ has
* elapsed.
*
* +blocker+ is what we are waiting on, informational only (for debugging and
* logging). There are no guarantee about its value.
*
* Expected to return boolean, specifying whether the blocking operation was
* successful or not.
*/
VALUE
rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
{
2020-09-21 00:54:08 +03:00
return rb_funcall(scheduler, id_block, 2, blocker, timeout);
}
/*
* Document-method: Fiber::Scheduler#unblock
* call-seq: unblock(blocker, fiber)
*
* Invoked to wake up Fiber previously blocked with #block (for example, Mutex#lock
* calls #block and Mutex#unlock calls #unblock). The scheduler should use
* the +fiber+ parameter to understand which fiber is unblocked.
*
* +blocker+ is what was awaited for, but it is informational only (for debugging
* and logging), and it is not guaranteed to be the same value as the +blocker+ for
* #block.
*
*/
VALUE
rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
{
VM_ASSERT(rb_obj_is_fiber(fiber));
return rb_funcall(scheduler, id_unblock, 2, blocker, fiber);
}
/*
* Document-method: Fiber::Scheduler#io_wait
* call-seq: io_wait(io, events, timeout)
*
* Invoked by IO#wait, IO#wait_readable, IO#wait_writable to ask whether the
* specified descriptor is ready for specified events within
* the specified +timeout+.
*
* +events+ is a bit mask of <tt>IO::READABLE</tt>, <tt>IO::WRITABLE</tt>, and
* <tt>IO::PRIORITY</tt>.
*
* Suggested implementation should register which Fiber is waiting for which
* resources and immediately calling Fiber.yield to pass control to other
* fibers. Then, in the #close method, the scheduler might dispatch all the
* I/O resources to fibers waiting for it.
*
* Expected to return the subset of events that are ready immediately.
*
*/
VALUE
rb_fiber_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout)
2020-08-20 04:51:45 +03:00
{
return rb_funcall(scheduler, id_io_wait, 3, io, events, timeout);
}
VALUE
rb_fiber_scheduler_io_wait_readable(VALUE scheduler, VALUE io)
2020-08-20 04:51:45 +03:00
{
return rb_fiber_scheduler_io_wait(scheduler, io, RB_UINT2NUM(RUBY_IO_READABLE), rb_io_timeout(io));
2020-08-20 04:51:45 +03:00
}
VALUE
rb_fiber_scheduler_io_wait_writable(VALUE scheduler, VALUE io)
2020-08-20 04:51:45 +03:00
{
return rb_fiber_scheduler_io_wait(scheduler, io, RB_UINT2NUM(RUBY_IO_WRITABLE), rb_io_timeout(io));
2020-08-20 04:51:45 +03:00
}
/*
* Document-method: Fiber::Scheduler#io_select
* call-seq: io_select(readables, writables, exceptables, timeout)
*
* Invoked by IO.select to ask whether the specified descriptors are ready for
* specified events within the specified +timeout+.
*
* Expected to return the 3-tuple of Array of IOs that are ready.
*
*/
VALUE rb_fiber_scheduler_io_select(VALUE scheduler, VALUE readables, VALUE writables, VALUE exceptables, VALUE timeout)
{
VALUE arguments[] = {
readables, writables, exceptables, timeout
};
return rb_fiber_scheduler_io_selectv(scheduler, 4, arguments);
}
VALUE rb_fiber_scheduler_io_selectv(VALUE scheduler, int argc, VALUE *argv)
{
// I wondered about extracting argv, and checking if there is only a single
// IO instance, and instead calling `io_wait`. However, it would require a
// decent amount of work and it would be hard to preserve the exact
// semantics of IO.select.
return rb_check_funcall(scheduler, id_io_select, argc, argv);
}
/*
* Document-method: Fiber::Scheduler#io_read
* call-seq: io_read(io, buffer, length) -> read length or -errno
*
* Invoked by IO#read to read +length+ bytes from +io+ into a specified
* +buffer+ (see IO::Buffer).
*
* The +length+ argument is the "minimum length to be read".
* If the IO buffer size is 8KiB, but the +length+ is +1024+ (1KiB), up to
* 8KiB might be read, but at least 1KiB will be.
* Generally, the only case where less data than +length+ will be read is if
* there is an error reading the data.
*
* Specifying a +length+ of 0 is valid and means try reading at least once
* and return any available data.
*
* Suggested implementation should try to read from +io+ in a non-blocking
* manner and call #io_wait if the +io+ is not ready (which will yield control
* to other fibers).
*
* See IO::Buffer for an interface available to return data.
*
* Expected to return number of bytes read, or, in case of an error, <tt>-errno</tt>
* (negated number corresponding to system's error code).
*
* The method should be considered _experimental_.
*/
VALUE
rb_fiber_scheduler_io_read(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
2020-08-20 15:53:08 +03:00
{
VALUE arguments[] = {
io, buffer, SIZET2NUM(length), SIZET2NUM(offset)
};
2021-02-09 10:59:15 +03:00
return rb_check_funcall(scheduler, id_io_read, 4, arguments);
2020-08-20 15:53:08 +03:00
}
VALUE
rb_fiber_scheduler_io_pread(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
{
VALUE arguments[] = {
io, buffer, OFFT2NUM(from), SIZET2NUM(length), SIZET2NUM(offset)
};
return rb_check_funcall(scheduler, id_io_pread, 5, arguments);
}
/*
* Document-method: Scheduler#io_write
* call-seq: io_write(io, buffer, length) -> written length or -errno
*
* Invoked by IO#write to write +length+ bytes to +io+ from
* from a specified +buffer+ (see IO::Buffer).
*
* The +length+ argument is the "(minimum) length to be written".
* If the IO buffer size is 8KiB, but the +length+ specified is 1024 (1KiB),
* at most 8KiB will be written, but at least 1KiB will be.
* Generally, the only case where less data than +length+ will be written is if
* there is an error writing the data.
*
* Specifying a +length+ of 0 is valid and means try writing at least once,
* as much data as possible.
*
* Suggested implementation should try to write to +io+ in a non-blocking
* manner and call #io_wait if the +io+ is not ready (which will yield control
* to other fibers).
*
* See IO::Buffer for an interface available to get data from buffer efficiently.
*
* Expected to return number of bytes written, or, in case of an error, <tt>-errno</tt>
* (negated number corresponding to system's error code).
*
* The method should be considered _experimental_.
*/
VALUE
rb_fiber_scheduler_io_write(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
2020-08-20 04:51:45 +03:00
{
VALUE arguments[] = {
io, buffer, SIZET2NUM(length), SIZET2NUM(offset)
};
return rb_check_funcall(scheduler, id_io_write, 4, arguments);
2021-07-02 13:41:16 +03:00
}
VALUE
rb_fiber_scheduler_io_pwrite(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
{
VALUE arguments[] = {
io, buffer, OFFT2NUM(from), SIZET2NUM(length), SIZET2NUM(offset)
};
return rb_check_funcall(scheduler, id_io_pwrite, 5, arguments);
}
2021-07-02 13:41:16 +03:00
VALUE
rb_fiber_scheduler_io_read_memory(VALUE scheduler, VALUE io, void *base, size_t size, size_t length)
{
VALUE buffer = rb_io_buffer_new(base, size, RB_IO_BUFFER_LOCKED);
VALUE result = rb_fiber_scheduler_io_read(scheduler, io, buffer, length, 0);
2021-07-02 13:41:16 +03:00
rb_io_buffer_unlock(buffer);
2021-07-02 13:41:16 +03:00
rb_io_buffer_free(buffer);
return result;
}
VALUE
rb_fiber_scheduler_io_write_memory(VALUE scheduler, VALUE io, const void *base, size_t size, size_t length)
{
2021-12-20 13:06:21 +03:00
VALUE buffer = rb_io_buffer_new((void*)base, size, RB_IO_BUFFER_LOCKED|RB_IO_BUFFER_READONLY);
2021-07-02 13:41:16 +03:00
VALUE result = rb_fiber_scheduler_io_write(scheduler, io, buffer, length, 0);
2021-07-02 13:41:16 +03:00
rb_io_buffer_unlock(buffer);
2021-07-02 13:41:16 +03:00
rb_io_buffer_free(buffer);
return result;
}
VALUE
rb_fiber_scheduler_io_close(VALUE scheduler, VALUE io)
{
VALUE arguments[] = {io};
return rb_check_funcall(scheduler, id_io_close, 1, arguments);
2020-08-20 04:51:45 +03:00
}
/*
* Document-method: Fiber::Scheduler#address_resolve
* call-seq: address_resolve(hostname) -> array_of_strings or nil
*
* Invoked by any method that performs a non-reverse DNS lookup. The most
* notable method is Addrinfo.getaddrinfo, but there are many other.
*
* The method is expected to return an array of strings corresponding to ip
* addresses the +hostname+ is resolved to, or +nil+ if it can not be resolved.
*
* Fairly exhaustive list of all possible call-sites:
*
* - Addrinfo.getaddrinfo
* - Addrinfo.tcp
* - Addrinfo.udp
* - Addrinfo.ip
* - Addrinfo.new
* - Addrinfo.marshal_load
* - SOCKSSocket.new
* - TCPServer.new
* - TCPSocket.new
* - IPSocket.getaddress
* - TCPSocket.gethostbyname
* - UDPSocket#connect
* - UDPSocket#bind
* - UDPSocket#send
* - Socket.getaddrinfo
* - Socket.gethostbyname
* - Socket.pack_sockaddr_in
* - Socket.sockaddr_in
* - Socket.unpack_sockaddr_in
*/
VALUE
rb_fiber_scheduler_address_resolve(VALUE scheduler, VALUE hostname)
{
VALUE arguments[] = {
hostname
};
return rb_check_funcall(scheduler, id_address_resolve, 1, arguments);
}
/*
* Document-method: Fiber::Scheduler#fiber
* call-seq: fiber(&block)
*
* Implementation of the Fiber.schedule. The method is <em>expected</em> to immediately
* run the given block of code in a separate non-blocking fiber, and to return that Fiber.
*
* Minimal suggested implementation is:
*
* def fiber(&block)
* fiber = Fiber.new(blocking: false, &block)
* fiber.resume
* fiber
* end
*/
VALUE
rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
{
return rb_funcall_passing_block_kw(scheduler, id_fiber_schedule, argc, argv, kw_splat);
}