pass `th` to `thread_sched_to_waiting()`

for future extension
This commit is contained in:
Koichi Sasada 2023-03-31 17:57:25 +09:00
Родитель 4c0f82eb5b
Коммит f803bcfc87
4 изменённых файлов: 15 добавлений и 15 удалений

Просмотреть файл

@ -174,10 +174,10 @@ static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_regio
#define THREAD_BLOCKING_BEGIN(th) do { \ #define THREAD_BLOCKING_BEGIN(th) do { \
struct rb_thread_sched * const sched = TH_SCHED(th); \ struct rb_thread_sched * const sched = TH_SCHED(th); \
RB_VM_SAVE_MACHINE_CONTEXT(th); \ RB_VM_SAVE_MACHINE_CONTEXT(th); \
thread_sched_to_waiting(sched); thread_sched_to_waiting((sched), (th));
#define THREAD_BLOCKING_END(th) \ #define THREAD_BLOCKING_END(th) \
thread_sched_to_running(sched, th); \ thread_sched_to_running((sched), (th)); \
rb_ractor_thread_switch(th->ractor, th); \ rb_ractor_thread_switch(th->ractor, th); \
} while(0) } while(0)
@ -778,12 +778,12 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
// after rb_ractor_living_threads_remove() // after rb_ractor_living_threads_remove()
// GC will happen anytime and this ractor can be collected (and destroy GVL). // GC will happen anytime and this ractor can be collected (and destroy GVL).
// So gvl_release() should be before it. // So gvl_release() should be before it.
thread_sched_to_dead(TH_SCHED(th)); thread_sched_to_dead(TH_SCHED(th), th);
rb_ractor_living_threads_remove(th->ractor, th); rb_ractor_living_threads_remove(th->ractor, th);
} }
else { else {
rb_ractor_living_threads_remove(th->ractor, th); rb_ractor_living_threads_remove(th->ractor, th);
thread_sched_to_dead(TH_SCHED(th)); thread_sched_to_dead(TH_SCHED(th), th);
} }
return 0; return 0;
@ -1480,7 +1480,7 @@ blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
RUBY_DEBUG_LOG(""); RUBY_DEBUG_LOG("");
RB_VM_SAVE_MACHINE_CONTEXT(th); RB_VM_SAVE_MACHINE_CONTEXT(th);
thread_sched_to_waiting(TH_SCHED(th)); thread_sched_to_waiting(TH_SCHED(th), th);
return TRUE; return TRUE;
} }
else { else {

Просмотреть файл

@ -30,7 +30,7 @@ thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
} }
static void static void
thread_sched_to_waiting(struct rb_thread_sched *sched) thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
{ {
} }

Просмотреть файл

@ -521,7 +521,7 @@ thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
} }
static rb_thread_t * static rb_thread_t *
thread_sched_to_waiting_common(struct rb_thread_sched *sched) thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th)
{ {
rb_thread_t *next; rb_thread_t *next;
sched->running = NULL; sched->running = NULL;
@ -532,19 +532,19 @@ thread_sched_to_waiting_common(struct rb_thread_sched *sched)
} }
static void static void
thread_sched_to_waiting(struct rb_thread_sched *sched) thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
{ {
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED); RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED);
rb_native_mutex_lock(&sched->lock); rb_native_mutex_lock(&sched->lock);
thread_sched_to_waiting_common(sched); thread_sched_to_waiting_common(sched, th);
rb_native_mutex_unlock(&sched->lock); rb_native_mutex_unlock(&sched->lock);
} }
static void static void
thread_sched_to_dead(struct rb_thread_sched *sched) thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
{ {
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED); RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED);
thread_sched_to_waiting(sched); thread_sched_to_waiting(sched, th);
} }
static void static void
@ -558,7 +558,7 @@ thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
*/ */
ubf_wakeup_all_threads(); ubf_wakeup_all_threads();
rb_native_mutex_lock(&sched->lock); rb_native_mutex_lock(&sched->lock);
next = thread_sched_to_waiting_common(sched); next = thread_sched_to_waiting_common(sched, th);
/* An another thread is processing GVL yield. */ /* An another thread is processing GVL yield. */
if (UNLIKELY(sched->wait_yield)) { if (UNLIKELY(sched->wait_yield)) {
@ -2209,7 +2209,7 @@ ubf_ppoll_sleep(void *ignore)
struct rb_thread_sched *sched = TH_SCHED(th); \ struct rb_thread_sched *sched = TH_SCHED(th); \
RB_VM_SAVE_MACHINE_CONTEXT(th); \ RB_VM_SAVE_MACHINE_CONTEXT(th); \
rb_native_mutex_lock(&sched->lock); \ rb_native_mutex_lock(&sched->lock); \
next = thread_sched_to_waiting_common(sched); \ next = thread_sched_to_waiting_common((sched), (th)); \
rb_native_mutex_unlock(&sched->lock); \ rb_native_mutex_unlock(&sched->lock); \
if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \ if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \
native_thread_yield(); \ native_thread_yield(); \

Просмотреть файл

@ -134,7 +134,7 @@ thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
#define thread_sched_to_dead thread_sched_to_waiting #define thread_sched_to_dead thread_sched_to_waiting
static void static void
thread_sched_to_waiting(struct rb_thread_sched *sched) thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
{ {
ReleaseMutex(sched->lock); ReleaseMutex(sched->lock);
} }
@ -142,7 +142,7 @@ thread_sched_to_waiting(struct rb_thread_sched *sched)
static void static void
thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th) thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
{ {
thread_sched_to_waiting(sched); thread_sched_to_waiting(sched, th);
native_thread_yield(); native_thread_yield();
thread_sched_to_running(sched, th); thread_sched_to_running(sched, th);
} }