From c1432a481615c7f1ad2c1426f0617f73599e2493 Mon Sep 17 00:00:00 2001 From: Nobuyoshi Nakada Date: Thu, 29 Jun 2023 00:08:36 +0900 Subject: [PATCH] Compile disabled code for thread cache always --- thread.c | 3 +++ thread_none.c | 2 -- thread_pthread.c | 39 ++++++++++++++++----------------------- thread_win32.c | 2 -- 4 files changed, 19 insertions(+), 27 deletions(-) diff --git a/thread.c b/thread.c index b48b1ac9cb..820f2b354c 100644 --- a/thread.c +++ b/thread.c @@ -5437,6 +5437,9 @@ Init_Thread(void) rb_thread_create_timer_thread(); Init_thread_sync(); + + // TODO: Suppress unused function warning for now + if (0) rb_thread_sched_destroy(NULL); } int diff --git a/thread_none.c b/thread_none.c index 1e151cccb2..27c6ddf754 100644 --- a/thread_none.c +++ b/thread_none.c @@ -46,12 +46,10 @@ rb_thread_sched_init(struct rb_thread_sched *sched) { } -#if 0 static void rb_thread_sched_destroy(struct rb_thread_sched *sched) { } -#endif // Do nothing for mutex guard void diff --git a/thread_pthread.c b/thread_pthread.c index 1f8bf7a1d5..ff7831600c 100644 --- a/thread_pthread.c +++ b/thread_pthread.c @@ -617,7 +617,6 @@ rb_thread_sched_init(struct rb_thread_sched *sched) sched->wait_yield = 0; } -#if 0 // TODO static void clear_thread_cache_altstack(void); @@ -637,7 +636,6 @@ rb_thread_sched_destroy(struct rb_thread_sched *sched) } clear_thread_cache_altstack(); } -#endif #if defined(HAVE_WORKING_FORK) static void thread_cache_reset(void); @@ -747,8 +745,12 @@ Init_native_thread(rb_thread_t *main_th) native_thread_init(main_th->nt); } -#ifndef USE_THREAD_CACHE -#define USE_THREAD_CACHE 1 +#if defined(USE_THREAD_CACHE) && !(USE_THREAD_CACHE+0) +# undef USE_THREAD_CACHE +# define USE_THREAD_CACHE 0 +#else +# undef USE_THREAD_CACHE +# define USE_THREAD_CACHE 1 #endif static void @@ -1092,10 +1094,7 @@ thread_start_func_1(void *th_ptr) #endif RB_ALTSTACK_INIT(void *altstack, th->nt->altstack); -#if USE_THREAD_CACHE - thread_start: -#endif - { + do { #if !defined USE_NATIVE_THREAD_INIT VALUE stack_start; #endif @@ -1114,15 +1113,12 @@ thread_start_func_1(void *th_ptr) #else thread_start_func_2(th, &stack_start); #endif + } while (USE_THREAD_CACHE && + /* cache thread */ + (th = register_cached_thread_and_wait(RB_ALTSTACK(altstack))) != 0); + if (!USE_THREAD_CACHE) { + RB_ALTSTACK_FREE(altstack); } -#if USE_THREAD_CACHE - /* cache thread */ - if ((th = register_cached_thread_and_wait(RB_ALTSTACK(altstack))) != 0) { - goto thread_start; - } -#else - RB_ALTSTACK_FREE(altstack); -#endif return 0; } @@ -1196,7 +1192,8 @@ static void thread_cache_reset(void) { } static int use_cached_thread(rb_thread_t *th) { -#if USE_THREAD_CACHE + if (!USE_THREAD_CACHE) return 0; + struct cached_thread_entry *entry; rb_native_mutex_lock(&thread_cache_lock); @@ -1209,16 +1206,14 @@ use_cached_thread(rb_thread_t *th) } rb_native_mutex_unlock(&thread_cache_lock); return !!entry; -#endif - return 0; } -#if 0 // TODO static void clear_thread_cache_altstack(void) { -#if USE_THREAD_CACHE + if (!USE_THREAD_CACHE) return; + struct cached_thread_entry *entry; rb_native_mutex_lock(&thread_cache_lock); @@ -1228,9 +1223,7 @@ clear_thread_cache_altstack(void) RB_ALTSTACK_FREE(altstack); } rb_native_mutex_unlock(&thread_cache_lock); -#endif } -#endif static struct rb_native_thread * native_thread_alloc(void) diff --git a/thread_win32.c b/thread_win32.c index b1aab910ef..543a045bd8 100644 --- a/thread_win32.c +++ b/thread_win32.c @@ -154,14 +154,12 @@ rb_thread_sched_init(struct rb_thread_sched *sched) sched->lock = w32_mutex_create(); } -#if 0 void rb_thread_sched_destroy(struct rb_thread_sched *sched) { if (GVL_DEBUG) fprintf(stderr, "sched destroy\n"); CloseHandle(sched->lock); } -#endif rb_thread_t * ruby_thread_from_native(void)