2022-01-15 18:37:54 +03:00
|
|
|
/*
|
|
|
|
A thread interface implementation without any system thread.
|
|
|
|
|
|
|
|
Assumption:
|
|
|
|
* There is a only single thread in the ruby process
|
|
|
|
* No signal happens targeting the ruby process
|
|
|
|
|
|
|
|
Note:
|
|
|
|
* No thread switching in the VM
|
|
|
|
* No timer thread because thread switching won't happen
|
|
|
|
* No mutex guard because the VM won't be racy
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
|
|
|
|
|
|
|
|
#include <time.h>
|
|
|
|
|
2022-11-05 19:10:35 +03:00
|
|
|
#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
|
|
|
|
# include "wasm/machine.h"
|
|
|
|
#endif
|
|
|
|
|
2022-01-15 18:37:54 +03:00
|
|
|
#define TIME_QUANTUM_MSEC (100)
|
|
|
|
#define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
|
|
|
|
#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
|
|
|
|
|
|
|
|
// Do nothing for GVL
|
|
|
|
static void
|
2022-04-16 21:40:23 +03:00
|
|
|
thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
|
2022-01-15 18:37:54 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2023-03-31 11:57:25 +03:00
|
|
|
thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
|
2022-01-15 18:37:54 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2023-02-06 11:29:39 +03:00
|
|
|
#define thread_sched_to_dead thread_sched_to_waiting
|
|
|
|
|
2022-01-15 18:37:54 +03:00
|
|
|
static void
|
2022-04-16 21:40:23 +03:00
|
|
|
thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
|
2022-01-15 18:37:54 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2023-04-10 04:53:13 +03:00
|
|
|
rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
|
2022-01-15 18:37:54 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2024-03-22 12:53:44 +03:00
|
|
|
#if 0
|
2022-01-15 18:37:54 +03:00
|
|
|
static void
|
2022-04-16 21:40:23 +03:00
|
|
|
rb_thread_sched_destroy(struct rb_thread_sched *sched)
|
2022-01-15 18:37:54 +03:00
|
|
|
{
|
|
|
|
}
|
2024-03-22 12:53:44 +03:00
|
|
|
#endif
|
2022-01-15 18:37:54 +03:00
|
|
|
|
|
|
|
// Do nothing for mutex guard
|
|
|
|
void
|
|
|
|
rb_native_mutex_lock(rb_nativethread_lock_t *lock)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_initialize(rb_nativethread_cond_t *cond)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_destroy(rb_nativethread_cond_t *cond)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_signal(rb_nativethread_cond_t *cond)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
// The only one thread in process
|
|
|
|
static rb_thread_t *ruby_native_thread;
|
|
|
|
|
|
|
|
rb_thread_t *
|
|
|
|
ruby_thread_from_native(void)
|
|
|
|
{
|
|
|
|
return ruby_native_thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ruby_thread_set_native(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
if (th && th->ec) {
|
|
|
|
rb_ractor_set_current_ec(th->ractor, th->ec);
|
|
|
|
}
|
|
|
|
ruby_native_thread = th;
|
|
|
|
return 1; // always succeed
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2022-04-22 15:19:03 +03:00
|
|
|
Init_native_thread(rb_thread_t *main_th)
|
2022-01-15 18:37:54 +03:00
|
|
|
{
|
|
|
|
// no TLS setup and no thread id setup
|
2022-04-22 15:19:03 +03:00
|
|
|
ruby_thread_set_native(main_th);
|
2022-01-15 18:37:54 +03:00
|
|
|
}
|
|
|
|
|
2023-04-10 04:53:13 +03:00
|
|
|
void
|
|
|
|
ruby_mn_threads_params(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-01-15 18:37:54 +03:00
|
|
|
static int
|
Pass down "stack start" variables from closer to the top of the stack
This commit changes how stack extents are calculated for both the main
thread and other threads. Ruby uses the address of a local variable as
part of the calculation for machine stack extents:
* pthreads uses it as a lower-bound on the start of the stack, because
glibc (and maybe other libcs) can store its own data on the stack
before calling into user code on thread creation.
* win32 uses it as an argument to VirtualQuery, which gets the extent of
the memory mapping which contains the variable
However, the local being used for this is actually too low (too close to
the leaf function call) in both the main thread case and the new thread
case.
In the main thread case, we have the `INIT_STACK` macro, which is used
for pthreads to set the `native_main_thread->stack_start` value. This
value is correctly captured at the very top level of the program (in
main.c). However, this is _not_ what's used to set the execution context
machine stack (`th->ec->machine_stack.stack_start`); that gets set as
part of a call to `ruby_thread_init_stack` in `Init_BareVM`, using the
address of a local variable allocated _inside_ `Init_BareVM`. This is
too low; we need to use a local allocated closer to the top of the
program.
In the new thread case, the lolcal is allocated inside
`native_thread_init_stack`, which is, again, too low.
In both cases, this means that we might have VALUEs lying outside the
bounds of `th->ec->machine.stack_{start,end}`, which won't be marked
correctly by the GC machinery.
To fix this,
* In the main thread case: We already have `INIT_STACK` at the right
level, so just pass that local var to `ruby_thread_init_stack`.
* In the new thread case: Allocate the local one level above the call to
`native_thread_init_stack` in `call_thread_start_func2`.
[Bug #20001]
fix
2023-11-12 05:24:55 +03:00
|
|
|
native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
|
2022-01-15 18:37:54 +03:00
|
|
|
{
|
2022-11-05 19:10:35 +03:00
|
|
|
#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
|
|
|
|
th->ec->machine.stack_start = (VALUE *)rb_wasm_stack_get_base();
|
|
|
|
#endif
|
2022-01-15 18:37:54 +03:00
|
|
|
return 0; // success
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
native_thread_create(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
th->status = THREAD_KILLED;
|
|
|
|
rb_ractor_living_threads_remove(th->ractor, th);
|
|
|
|
rb_notimplement();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do nothing for handling ubf because no other thread doesn't exist and unblock anything
|
|
|
|
#define register_ubf_list(th) (void)(th)
|
|
|
|
#define unregister_ubf_list(th) (void)(th)
|
|
|
|
#define ubf_select 0
|
|
|
|
|
|
|
|
inline static void
|
|
|
|
ubf_wakeup_all_threads(void)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static int
|
|
|
|
ubf_threads_empty(void)
|
|
|
|
{
|
|
|
|
return 1; // true
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static void
|
|
|
|
ubf_list_atfork()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static void
|
|
|
|
ubf_timer_disarm(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// No timer thread because thread switching won't happen
|
|
|
|
#define TIMER_THREAD_CREATED_P() (1)
|
|
|
|
inline static void
|
|
|
|
rb_thread_create_timer_thread(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_thread_wakeup_timer_thread(int sig)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static int
|
|
|
|
native_stop_timer_thread(void)
|
|
|
|
{
|
|
|
|
return 1; // success
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static void
|
|
|
|
native_reset_timer_thread(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do nothing for thread naming
|
|
|
|
inline static void
|
|
|
|
native_set_thread_name(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static void
|
|
|
|
native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't expose native thread id for now to keep system's thread API agnostic
|
|
|
|
#define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
|
|
|
|
|
|
|
|
// No reserved fd for piping threads
|
|
|
|
int
|
|
|
|
rb_reserved_fd_p(int fd)
|
|
|
|
{
|
|
|
|
return 0; // not reserved
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't expose native thread info for now to keep system's thread API agnostic
|
|
|
|
rb_nativethread_id_t
|
|
|
|
rb_nativethread_self(void)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do nothing for sigwait things because of no signal assumption
|
|
|
|
// Q(katei): is this correct description?
|
|
|
|
int
|
|
|
|
rb_sigwait_fd_get(const rb_thread_t *th)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
NORETURN(void rb_sigwait_fd_put(rb_thread_t *, int));
|
|
|
|
void
|
|
|
|
rb_sigwait_fd_put(rb_thread_t *th, int fd)
|
|
|
|
{
|
|
|
|
rb_bug("not implemented, should not be called rb_sigwait_fd_put");
|
|
|
|
}
|
|
|
|
|
|
|
|
NORETURN(void rb_sigwait_sleep(const rb_thread_t *, int, const rb_hrtime_t *));
|
|
|
|
void
|
|
|
|
rb_sigwait_sleep(const rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *rel)
|
|
|
|
{
|
|
|
|
rb_bug("not implemented, should not be called rb_sigwait_sleep");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
|
|
|
{
|
|
|
|
// No signal assumption allows the use of uninterruptible sleep
|
|
|
|
struct timespec ts;
|
|
|
|
(void)clock_nanosleep(CLOCK_REALTIME, 0, rb_hrtime2timespec(&ts, rel), NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
|
|
|
|
{
|
|
|
|
return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
|
|
|
|
}
|
|
|
|
|
2023-04-10 04:53:13 +03:00
|
|
|
static bool
|
|
|
|
th_has_dedicated_nt(const rb_thread_t *th)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_add_running_thread(rb_thread_t *th){
|
|
|
|
// do nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_del_running_thread(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
// do nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_threadptr_sched_free(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
// do nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
|
|
|
|
{
|
|
|
|
// do nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
|
|
|
|
{
|
|
|
|
// do nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_threadptr_remove(rb_thread_t *th)
|
|
|
|
{
|
|
|
|
// do nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_thread_sched_mark_zombies(rb_vm_t *vm)
|
2022-01-15 18:37:54 +03:00
|
|
|
{
|
2023-04-10 04:53:13 +03:00
|
|
|
// do nothing
|
2022-01-15 18:37:54 +03:00
|
|
|
}
|
2023-04-10 04:53:13 +03:00
|
|
|
|
2024-02-20 13:09:23 +03:00
|
|
|
bool
|
|
|
|
rb_thread_lock_native_thread(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-01-15 18:37:54 +03:00
|
|
|
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|