From 03d21a4fb099da7c52e6591e17704c297871b7db Mon Sep 17 00:00:00 2001 From: Koichi Sasada Date: Fri, 22 Apr 2022 21:19:03 +0900 Subject: [PATCH] introduce struct `rb_native_thread` `rb_thread_t` contained `native_thread_data_t` to represent thread implementation dependent data. This patch separates them and rename it `rb_native_thread` and point it from `rb_thraed_t`. Now, 1 Ruby thread (`rb_thread_t`) has 1 native thread (`rb_native_thread`). --- thread.c | 4 +- thread_none.c | 6 +- thread_none.h | 5 +- thread_pthread.c | 162 ++++++++++++++++++++++++----------------------- thread_pthread.h | 25 +++++++- thread_win32.c | 49 +++++++------- thread_win32.h | 9 ++- vm.c | 13 ++-- vm_core.h | 22 +++---- vm_dump.c | 4 +- 10 files changed, 167 insertions(+), 132 deletions(-) diff --git a/thread.c b/thread.c index 7881a8c05b..aeb1a6308d 100644 --- a/thread.c +++ b/thread.c @@ -339,7 +339,7 @@ rb_thread_s_debug_set(VALUE self, VALUE val) #ifndef fill_thread_id_str # define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid)) # define fill_thread_id_str(th) (void)0 -# define thread_id_str(th) ((void *)(uintptr_t)(th)->thread_id) +# define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id) # define PRI_THREAD_ID "p" #endif @@ -3333,7 +3333,7 @@ rb_thread_setname(VALUE thread, VALUE name) } target_th->name = name; if (threadptr_initialized(target_th)) { - native_set_another_thread_name(target_th->thread_id, name); + native_set_another_thread_name(target_th->nt->thread_id, name); } return name; } diff --git a/thread_none.c b/thread_none.c index fb7b9f9a97..18986d3c5b 100644 --- a/thread_none.c +++ b/thread_none.c @@ -126,11 +126,11 @@ ruby_thread_set_native(rb_thread_t *th) } void -Init_native_thread(rb_thread_t *th) +Init_native_thread(rb_thread_t *main_th) { // no TLS setup and no thread id setup - ruby_thread_set_native(th); - fill_thread_id_str(th); + ruby_thread_set_native(main_th); + fill_thread_id_str(main_th); } static void diff --git a/thread_none.h b/thread_none.h index 3956fbfe7f..89f64667f0 100644 --- a/thread_none.h +++ b/thread_none.h @@ -8,8 +8,11 @@ // based implementation in vm.c #define RB_THREAD_LOCAL_SPECIFIER -typedef struct native_thread_data_struct {} native_thread_data_t; +struct rb_native_thread { + void *thread_id; // NULL +}; +struct rb_thread_sched_item {}; struct rb_thread_sched {}; RUBY_EXTERN struct rb_execution_context_struct *ruby_current_ec; diff --git a/thread_pthread.c b/thread_pthread.c index 0d2d7c41b0..c1ec2067f5 100644 --- a/thread_pthread.c +++ b/thread_pthread.c @@ -225,15 +225,17 @@ static int native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t * static int designate_timer_thread(struct rb_thread_sched *sched) { - native_thread_data_t *last; + rb_thread_t *last; - last = ccan_list_tail(&sched->readyq, native_thread_data_t, node.readyq); + last = ccan_list_tail(&sched->readyq, rb_thread_t, sched.node.readyq); if (last) { - rb_native_cond_signal(&last->cond.readyq); + rb_native_cond_signal(&last->nt->cond.readyq); return TRUE; } - return FALSE; + else { + return FALSE; + } } /* @@ -245,7 +247,6 @@ do_gvl_timer(struct rb_thread_sched *sched, rb_thread_t *th) { rb_vm_t *vm = GET_VM(); static rb_hrtime_t abs; - native_thread_data_t *nd = &th->native_thread_data; sched->timer = th; @@ -253,9 +254,9 @@ do_gvl_timer(struct rb_thread_sched *sched, rb_thread_t *th) ubf_timer_disarm(); if (sched->timer_err == ETIMEDOUT) { - abs = native_cond_timeout(&nd->cond.readyq, TIME_QUANTUM_NSEC); + abs = native_cond_timeout(&th->nt->cond.readyq, TIME_QUANTUM_NSEC); } - sched->timer_err = native_cond_timedwait(&nd->cond.readyq, &sched->lock, &abs); + sched->timer_err = native_cond_timedwait(&th->nt->cond.readyq, &sched->lock, &abs); ubf_wakeup_all_threads(); ruby_sigchld_handler(vm); @@ -282,22 +283,20 @@ do_gvl_timer(struct rb_thread_sched *sched, rb_thread_t *th) } static void -thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, native_thread_data_t *nd) +thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th) { - ccan_list_add_tail(&sched->readyq, &nd->node.readyq); + ccan_list_add_tail(&sched->readyq, &th->sched.node.readyq); } static void thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th) { if (sched->running) { - native_thread_data_t *nd = &th->native_thread_data; - VM_ASSERT(th->unblock.func == 0 && - "we must not be in ubf_list and GVL readyq at the same time"); + "we must not be in ubf_list and GVL readyq at the same time"); // waiting -> ready - thread_sched_to_ready_common(sched, th, nd); + thread_sched_to_ready_common(sched, th); // wait for running chance do { @@ -305,11 +304,11 @@ thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th) do_gvl_timer(sched, th); } else { - rb_native_cond_wait(&nd->cond.readyq, &sched->lock); + rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock); } } while (sched->running); - ccan_list_del_init(&nd->node.readyq); + ccan_list_del_init(&th->sched.node.readyq); if (sched->need_yield) { sched->need_yield = 0; @@ -338,13 +337,13 @@ thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th) rb_native_mutex_unlock(&sched->lock); } -static const native_thread_data_t * +static rb_thread_t * thread_sched_to_waiting_common(struct rb_thread_sched *sched) { - native_thread_data_t *next; + rb_thread_t *next; sched->running = NULL; - next = ccan_list_top(&sched->readyq, native_thread_data_t, node.readyq); - if (next) rb_native_cond_signal(&next->cond.readyq); + next = ccan_list_top(&sched->readyq, rb_thread_t, sched.node.readyq); + if (next) rb_native_cond_signal(&next->nt->cond.readyq); return next; } @@ -360,7 +359,7 @@ thread_sched_to_waiting(struct rb_thread_sched *sched) static void thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th) { - const native_thread_data_t *next; + rb_thread_t *next; /* * Perhaps other threads are stuck in blocking region w/o GVL, too, @@ -646,6 +645,14 @@ ruby_thread_from_native(void) int ruby_thread_set_native(rb_thread_t *th) { + if (th) { +#ifdef USE_UBF_LIST + ccan_list_node_init(&th->sched.node.ubf); +#endif + } + + // setup TLS + if (th && th->ec) { rb_ractor_set_current_ec(th->ractor, th->ec); } @@ -657,10 +664,31 @@ ruby_thread_set_native(rb_thread_t *th) #endif } -static void native_thread_init(rb_thread_t *th); +#ifdef RB_THREAD_T_HAS_NATIVE_ID +static int +get_native_thread_id(void) +{ +#ifdef __linux__ + return (int)syscall(SYS_gettid); +#elif defined(__FreeBSD__) + return pthread_getthreadid_np(); +#endif +} +#endif + +static void +native_thread_init(struct rb_native_thread *nt) +{ +#ifdef RB_THREAD_T_HAS_NATIVE_ID + nt->tid = get_native_thread_id(); +#endif + rb_native_cond_initialize(&nt->cond.readyq); + if (&nt->cond.readyq != &nt->cond.intr) + rb_native_cond_initialize(&nt->cond.intr); +} void -Init_native_thread(rb_thread_t *th) +Init_native_thread(rb_thread_t *main_th) { #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) if (condattr_monotonic) { @@ -680,39 +708,13 @@ Init_native_thread(rb_thread_t *th) rb_bug("pthread_key_create failed (ruby_current_ec_key)"); } #endif - th->thread_id = pthread_self(); - ruby_thread_set_native(th); - fill_thread_id_str(th); - native_thread_init(th); posix_signal(SIGVTALRM, null_func); -} -#ifdef RB_THREAD_T_HAS_NATIVE_ID -static int -get_native_thread_id(void) -{ -#ifdef __linux__ - return (int)syscall(SYS_gettid); -#elif defined(__FreeBSD__) - return pthread_getthreadid_np(); -#endif -} -#endif - -static void -native_thread_init(rb_thread_t *th) -{ - native_thread_data_t *nd = &th->native_thread_data; - -#ifdef RB_THREAD_T_HAS_NATIVE_ID - th->tid = get_native_thread_id(); -#endif -#ifdef USE_UBF_LIST - ccan_list_node_init(&nd->node.ubf); -#endif - rb_native_cond_initialize(&nd->cond.readyq); - if (&nd->cond.readyq != &nd->cond.intr) - rb_native_cond_initialize(&nd->cond.intr); + // setup main thread + main_th->nt->thread_id = pthread_self(); + ruby_thread_set_native(main_th); + fill_thread_id_str(main_th); + native_thread_init(main_th->nt); } #ifndef USE_THREAD_CACHE @@ -722,11 +724,12 @@ native_thread_init(rb_thread_t *th) static void native_thread_destroy(rb_thread_t *th) { - native_thread_data_t *nd = &th->native_thread_data; + struct rb_native_thread *nt = th->nt; - rb_native_cond_destroy(&nd->cond.readyq); - if (&nd->cond.readyq != &nd->cond.intr) - rb_native_cond_destroy(&nd->cond.intr); + rb_native_cond_destroy(&nt->cond.readyq); + + if (&nt->cond.readyq != &nt->cond.intr) + rb_native_cond_destroy(&nt->cond.intr); /* * prevent false positive from ruby_thread_has_gvl_p if that @@ -1066,8 +1069,10 @@ thread_start_func_1(void *th_ptr) #if defined USE_NATIVE_THREAD_INIT native_thread_init_stack(th); #endif - native_thread_init(th); - /* run */ + + native_thread_init(th->nt); + + /* run */ #if defined USE_NATIVE_THREAD_INIT thread_start_func_2(th, th->ec->machine.stack_start); #else @@ -1162,8 +1167,8 @@ use_cached_thread(rb_thread_t *th) entry = ccan_list_pop(&cached_thread_head, struct cached_thread_entry, node); if (entry) { entry->th = th; - /* th->thread_id must be set before signal for Thread#name= */ - th->thread_id = entry->thread_id; + /* th->nt->thread_id must be set before signal for Thread#name= */ + th->nt->thread_id = entry->thread_id; fill_thread_id_str(th); rb_native_cond_signal(&entry->cond); } @@ -1197,6 +1202,9 @@ native_thread_create(rb_thread_t *th) { int err = 0; + VM_ASSERT(th->nt == 0); + th->nt = ZALLOC(struct rb_native_thread); + if (use_cached_thread(th)) { thread_debug("create (use cached thread): %p\n", (void *)th); } @@ -1222,7 +1230,7 @@ native_thread_create(rb_thread_t *th) # endif CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); - err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th); + err = pthread_create(&th->nt->thread_id, &attr, thread_start_func_1, th); thread_debug("create: %p (%d)\n", (void *)th, err); /* should be done in the created thread */ fill_thread_id_str(th); @@ -1241,7 +1249,7 @@ native_thread_apply_priority(rb_thread_t *th) int policy; int priority = 0 - th->priority; int max, min; - pthread_getschedparam(th->thread_id, &policy, &sp); + pthread_getschedparam(th->nt->thread_id, &policy, &sp); max = sched_get_priority_max(policy); min = sched_get_priority_min(policy); @@ -1253,7 +1261,7 @@ native_thread_apply_priority(rb_thread_t *th) } sp.sched_priority = priority; - pthread_setschedparam(th->thread_id, policy, &sp); + pthread_setschedparam(th->nt->thread_id, policy, &sp); #else /* not touched */ #endif @@ -1272,14 +1280,14 @@ ubf_pthread_cond_signal(void *ptr) { rb_thread_t *th = (rb_thread_t *)ptr; thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th); - rb_native_cond_signal(&th->native_thread_data.cond.intr); + rb_native_cond_signal(&th->nt->cond.intr); } static void native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel) { rb_nativethread_lock_t *lock = &th->interrupt_lock; - rb_nativethread_cond_t *cond = &th->native_thread_data.cond.intr; + rb_nativethread_cond_t *cond = &th->nt->cond.intr; /* Solaris cond_timedwait() return EINVAL if an argument is greater than * current_time + 100,000,000. So cut up to 100,000,000. This is @@ -1340,7 +1348,7 @@ ubf_list_atfork(void) static void register_ubf_list(rb_thread_t *th) { - struct ccan_list_node *node = &th->native_thread_data.node.ubf; + struct ccan_list_node *node = &th->sched.node.ubf; if (ccan_list_empty((struct ccan_list_head*)node)) { rb_native_mutex_lock(&ubf_list_lock); @@ -1353,7 +1361,7 @@ register_ubf_list(rb_thread_t *th) static void unregister_ubf_list(rb_thread_t *th) { - struct ccan_list_node *node = &th->native_thread_data.node.ubf; + struct ccan_list_node *node = &th->sched.node.ubf; /* we can't allow re-entry into ubf_list_head */ VM_ASSERT(th->unblock.func == 0); @@ -1376,7 +1384,7 @@ static void ubf_wakeup_thread(rb_thread_t *th) { thread_debug("thread_wait_queue_wakeup (%"PRI_THREAD_ID")\n", thread_id_str(th)); - pthread_kill(th->thread_id, SIGVTALRM); + pthread_kill(th->nt->thread_id, SIGVTALRM); } static void @@ -1424,13 +1432,11 @@ ubf_threads_empty(void) static void ubf_wakeup_all_threads(void) { - rb_thread_t *th; - native_thread_data_t *dat; - if (!ubf_threads_empty()) { rb_native_mutex_lock(&ubf_list_lock); - ccan_list_for_each(&ubf_list_head, dat, node.ubf) { - th = ccan_container_of(dat, rb_thread_t, native_thread_data); + rb_thread_t *th; + + ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) { ubf_wakeup_thread(th); } rb_native_mutex_unlock(&ubf_list_lock); @@ -1755,12 +1761,12 @@ static VALUE native_thread_native_thread_id(rb_thread_t *target_th) { #ifdef RB_THREAD_T_HAS_NATIVE_ID - int tid = target_th->tid; + int tid = target_th->nt->tid; if (tid == 0) return Qnil; return INT2FIX(tid); #elif defined(__APPLE__) uint64_t tid; - int e = pthread_threadid_np(target_th->thread_id, &tid); + int e = pthread_threadid_np(target_th->nt->thread_id, &tid); if (e != 0) rb_syserr_fail(e, "pthread_threadid_np"); return ULL2NUM((unsigned long long)tid); #endif @@ -1970,7 +1976,7 @@ ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr) #ifdef STACKADDR_AVAILABLE if (get_stack(&base, &size) == 0) { # ifdef __APPLE__ - if (pthread_equal(th->thread_id, native_main_thread.id)) { + if (pthread_equal(th->nt->thread_id, native_main_thread.id)) { struct rlimit rlim; if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) { size = (size_t)rlim.rlim_cur; @@ -2189,7 +2195,7 @@ ubf_ppoll_sleep(void *ignore) * [ruby-core:90417] [Bug #15398] */ #define THREAD_BLOCKING_YIELD(th) do { \ - const native_thread_data_t *next; \ + const rb_thread_t *next; \ struct rb_thread_sched *sched = TH_SCHED(th); \ RB_GC_SAVE_MACHINE_CONTEXT(th); \ rb_native_mutex_lock(&sched->lock); \ diff --git a/thread_pthread.h b/thread_pthread.h index f65916fea9..cc1675b77c 100644 --- a/thread_pthread.h +++ b/thread_pthread.h @@ -17,11 +17,30 @@ #define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER #define RB_NATIVETHREAD_COND_INIT PTHREAD_COND_INITIALIZER -typedef struct native_thread_data_struct { +// per-Thead scheduler helper data +struct rb_thread_sched_item { union { struct ccan_list_node ubf; struct ccan_list_node readyq; // protected by sched->lock } node; +}; + +struct rb_native_thread { + int id; + + rb_nativethread_id_t thread_id; + +#ifdef NON_SCALAR_THREAD_ID + rb_thread_id_string_t thread_id_string; +#endif + +#ifdef RB_THREAD_T_HAS_NATIVE_ID + int tid; +#endif + + struct rb_thread_struct *running_thread; + + // to control native thread #if defined(__GLIBC__) || defined(__FreeBSD__) union #else @@ -31,11 +50,11 @@ typedef struct native_thread_data_struct { */ struct #endif - { + { rb_nativethread_cond_t intr; /* th->interrupt_lock */ rb_nativethread_cond_t readyq; /* use sched->lock */ } cond; -} native_thread_data_t; +}; #undef except #undef try diff --git a/thread_win32.c b/thread_win32.c index d8544af3a3..966f0af5b6 100644 --- a/thread_win32.c +++ b/thread_win32.c @@ -155,7 +155,7 @@ ruby_thread_set_native(rb_thread_t *th) } void -Init_native_thread(rb_thread_t *th) +Init_native_thread(rb_thread_t *main_th) { if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) { rb_bug("TlsAlloc() for ruby_current_ec_key fails"); @@ -163,17 +163,21 @@ Init_native_thread(rb_thread_t *th) if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) { rb_bug("TlsAlloc() for ruby_native_thread_key fails"); } - ruby_thread_set_native(th); + + // setup main thread + + ruby_thread_set_native(main_th); + main_th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0); + DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(), - &th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS); - - th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0); + &main_th->nt->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS); thread_debug("initial thread (th: %p, thid: %p, event: %p)\n", - th, GET_THREAD()->thread_id, - th->native_thread_data.interrupt_event); + main_th, + main_th->nt->thread_id, + main_th->nt->interrupt_event); } static int @@ -186,7 +190,7 @@ w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th) thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n", events, count, timeout, th); - if (th && (intr = th->native_thread_data.interrupt_event)) { + if (th && (intr = th->nt->interrupt_event)) { if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) { targets = ALLOCA_N(HANDLE, count + 1); memcpy(targets, events, sizeof(HANDLE) * count); @@ -194,7 +198,7 @@ w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th) targets[count++] = intr; thread_debug(" * handle: %p (count: %d, intr)\n", intr, count); } - else if (intr == th->native_thread_data.interrupt_event) { + else if (intr == th->nt->interrupt_event) { w32_error("w32_wait_events"); } } @@ -592,8 +596,8 @@ native_thread_init_stack(rb_thread_t *th) static void native_thread_destroy(rb_thread_t *th) { - HANDLE intr = InterlockedExchangePointer(&th->native_thread_data.interrupt_event, 0); - thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id); + HANDLE intr = InterlockedExchangePointer(&th->nt->interrupt_event, 0); + thread_debug("close handle - intr: %p, thid: %p\n", intr, th->nt->thread_id); w32_close_handle(intr); } @@ -601,14 +605,14 @@ static unsigned long __stdcall thread_start_func_1(void *th_ptr) { rb_thread_t *th = th_ptr; - volatile HANDLE thread_id = th->thread_id; + volatile HANDLE thread_id = th->nt->thread_id; native_thread_init_stack(th); - th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0); + th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0); /* run */ thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th, - th->thread_id, th->native_thread_data.interrupt_event); + th->nt->thread_id, th->nt->interrupt_event); thread_start_func_2(th, th->ec->machine.stack_start); @@ -621,19 +625,20 @@ static int native_thread_create(rb_thread_t *th) { const size_t stack_size = th->vm->default_params.thread_machine_stack_size + th->vm->default_params.thread_vm_stack_size; - th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th); + th->nt = ZALLOC(struct rb_native_thread); + th->nt->thread_id = w32_create_thread(stack_size, thread_start_func_1, th); - if ((th->thread_id) == 0) { + if ((th->nt->thread_id) == 0) { return thread_errno; } - w32_resume_thread(th->thread_id); + w32_resume_thread(th->nt->thread_id); if (THREAD_DEBUG) { Sleep(0); thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %"PRIuSIZE"\n", - th, th->thread_id, - th->native_thread_data.interrupt_event, stack_size); + th, th->nt->thread_id, + th->nt->interrupt_event, stack_size); } return 0; } @@ -660,7 +665,7 @@ native_thread_apply_priority(rb_thread_t *th) priority = THREAD_PRIORITY_NORMAL; } - SetThreadPriority(th->thread_id, priority); + SetThreadPriority(th->nt->thread_id, priority); } #endif /* USE_NATIVE_THREAD_PRIORITY */ @@ -699,7 +704,7 @@ ubf_handle(void *ptr) rb_thread_t *th = (rb_thread_t *)ptr; thread_debug("ubf_handle: %p\n", th); - if (!SetEvent(th->native_thread_data.interrupt_event)) { + if (!SetEvent(th->nt->interrupt_event)) { w32_error("ubf_handle"); } } @@ -848,7 +853,7 @@ native_set_thread_name(rb_thread_t *th) static VALUE native_thread_native_thread_id(rb_thread_t *th) { - DWORD tid = GetThreadId(th->thread_id); + DWORD tid = GetThreadId(th->nt->thread_id); if (tid == 0) rb_sys_fail("GetThreadId"); return ULONG2NUM(tid); } diff --git a/thread_win32.h b/thread_win32.h index 95cbe7c984..12aef02728 100644 --- a/thread_win32.h +++ b/thread_win32.h @@ -26,9 +26,14 @@ struct rb_thread_cond_struct { struct cond_event_entry *prev; }; -typedef struct native_thread_data_struct { +struct rb_native_thread { + HANDLE thread_id; HANDLE interrupt_event; -} native_thread_data_t; +}; + +struct rb_thread_sched_item { + char dmy; +}; struct rb_thread_sched { HANDLE lock; diff --git a/vm.c b/vm.c index 8cec781f7a..8a1e01b2a3 100644 --- a/vm.c +++ b/vm.c @@ -3165,7 +3165,8 @@ thread_free(void *ptr) RUBY_GC_INFO("MRI main thread\n"); } else { - ruby_xfree(ptr); + ruby_xfree(th->nt); // TODO + ruby_xfree(th); } RUBY_FREE_LEAVE("thread"); @@ -3207,11 +3208,8 @@ rb_obj_is_thread(VALUE obj) static VALUE thread_alloc(VALUE klass) { - VALUE obj; rb_thread_t *th; - obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th); - - return obj; + return TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th); } inline void @@ -3275,8 +3273,8 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm, rb_ractor_t *r) th->top_self = vm->top_self; // 0 while self == 0 th->value = Qundef; -#ifdef NON_SCALAR_THREAD_ID - th->thread_id_string[0] = '\0'; +#if defined(NON_SCALAR_THREAD_ID) && !defined(__wasm__) && !defined(__EMSCRIPTEN__) + th->nt->thread_id_string[0] = '\0'; #endif th->ec->errinfo = Qnil; @@ -3947,6 +3945,7 @@ Init_BareVM(void) vm->constant_cache = rb_id_table_create(0); // setup main thread + th->nt = ZALLOC(struct rb_native_thread); Init_native_thread(th); th_init(th, 0, vm, vm->ractor.main_ractor = rb_ractor_main_alloc()); diff --git a/vm_core.h b/vm_core.h index 5e3f0bc002..9a5269c6bc 100644 --- a/vm_core.h +++ b/vm_core.h @@ -68,6 +68,10 @@ # include #endif +#if defined(__linux__) || defined(__FreeBSD__) +# define RB_THREAD_T_HAS_NATIVE_ID +#endif + #include "ruby/internal/stdbool.h" #include "ccan/list/list.h" #include "id.h" @@ -969,18 +973,18 @@ struct rb_ext_config { typedef struct rb_ractor_struct rb_ractor_t; -#if defined(__linux__) || defined(__FreeBSD__) -# define RB_THREAD_T_HAS_NATIVE_ID -#endif +struct rb_native_thread; typedef struct rb_thread_struct { struct ccan_list_node lt_node; // managed by a ractor VALUE self; rb_ractor_t *ractor; rb_vm_t *vm; - + struct rb_native_thread *nt; rb_execution_context_t *ec; + struct rb_thread_sched_item sched; + VALUE last_status; /* $? */ /* for cfunc */ @@ -991,15 +995,10 @@ typedef struct rb_thread_struct { VALUE top_wrapper; /* thread control */ - rb_nativethread_id_t thread_id; -#ifdef NON_SCALAR_THREAD_ID - rb_thread_id_string_t thread_id_string; -#endif -#ifdef RB_THREAD_T_HAS_NATIVE_ID - int tid; -#endif + BITFIELD(enum rb_thread_status, status, 2); /* bit flags */ + unsigned int locking_native_thread : 1; unsigned int to_kill : 1; unsigned int abort_on_exception: 1; unsigned int report_on_exception: 1; @@ -1007,7 +1006,6 @@ typedef struct rb_thread_struct { int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */ uint32_t running_time_us; /* 12500..800000 */ - native_thread_data_t native_thread_data; void *blocking_region_buffer; VALUE thgroup; diff --git a/vm_dump.c b/vm_dump.c index e4410e3e7c..ebc9a03680 100644 --- a/vm_dump.c +++ b/vm_dump.c @@ -1194,10 +1194,10 @@ rb_vmdebug_stack_dump_all_threads(void) ccan_list_for_each(&r->threads.set, th, lt_node) { #ifdef NON_SCALAR_THREAD_ID rb_thread_id_string_t buf; - ruby_fill_thread_id_string(th->thread_id, buf); + ruby_fill_thread_id_string(th->nt->thread_id, buf); fprintf(stderr, "th: %p, native_id: %s\n", th, buf); #else - fprintf(stderr, "th: %p, native_id: %p\n", (void *)th, (void *)(uintptr_t)th->thread_id); + fprintf(stderr, "th: %p, native_id: %p\n", (void *)th, (void *)(uintptr_t)th->nt->thread_id); #endif rb_vmdebug_stack_dump_raw(th->ec, th->ec->cfp); }