`struct rb_thread_sched_waiting`

Introduce `struct rb_thread_sched_waiting` and `timer_th.waiting`
can contain other than `rb_thread_t`.
This commit is contained in:
Koichi Sasada 2024-04-28 07:19:09 +09:00
Родитель a9f6bd028a
Коммит ffc69eec0a
3 изменённых файлов: 70 добавлений и 51 удалений

Просмотреть файл

@ -2873,6 +2873,17 @@ static void timer_thread_wakeup_thread(rb_thread_t *th);
#include "thread_pthread_mn.c"
static rb_thread_t *
thread_sched_waiting_thread(struct rb_thread_sched_waiting *w)
{
if (w) {
return (rb_thread_t *)((size_t)w - offsetof(rb_thread_t, sched.waiting_reason));
}
else {
return NULL;
}
}
static int
timer_thread_set_timeout(rb_vm_t *vm)
{
@ -2905,7 +2916,9 @@ timer_thread_set_timeout(rb_vm_t *vm)
if (vm->ractor.sched.timeslice_wait_inf) {
rb_native_mutex_lock(&timer_th.waiting_lock);
{
rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
rb_thread_t *th = thread_sched_waiting_thread(w);
if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
rb_hrtime_t now = rb_hrtime_now();
rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
@ -2955,22 +2968,22 @@ timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
static rb_thread_t *
timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
{
rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
if (th != NULL &&
(th->sched.waiting_reason.flags & thread_sched_waiting_timeout) &&
timer_thread_check_exceed(th->sched.waiting_reason.data.timeout, now)) {
if (w != NULL &&
(w->flags & thread_sched_waiting_timeout) &&
timer_thread_check_exceed(w->data.timeout, now)) {
RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
// delete from waiting list
ccan_list_del_init(&th->sched.waiting_reason.node);
ccan_list_del_init(&w->node);
// setup result
th->sched.waiting_reason.flags = thread_sched_waiting_none;
th->sched.waiting_reason.data.result = 0;
w->flags = thread_sched_waiting_none;
w->data.result = 0;
return th;
return thread_sched_waiting_thread(w);
}
return NULL;

Просмотреть файл

@ -17,6 +17,31 @@
#define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
#define RB_NATIVETHREAD_COND_INIT PTHREAD_COND_INITIALIZER
// this data should be protected by timer_th.waiting_lock
struct rb_thread_sched_waiting {
enum thread_sched_waiting_flag {
thread_sched_waiting_none = 0x00,
thread_sched_waiting_timeout = 0x01,
thread_sched_waiting_io_read = 0x02,
thread_sched_waiting_io_write = 0x08,
thread_sched_waiting_io_force = 0x40, // ignore readable
} flags;
struct {
// should be compat with hrtime.h
#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL
int128_t timeout;
#else
uint64_t timeout;
#endif
int fd; // -1 for timeout only
int result;
} data;
// connected to timer_th.waiting
struct ccan_list_node node;
};
// per-Thead scheduler helper data
struct rb_thread_sched_item {
struct {
@ -38,30 +63,7 @@ struct rb_thread_sched_item {
struct ccan_list_node zombie_threads;
} node;
// this data should be protected by timer_th.waiting_lock
struct {
enum thread_sched_waiting_flag {
thread_sched_waiting_none = 0x00,
thread_sched_waiting_timeout = 0x01,
thread_sched_waiting_io_read = 0x02,
thread_sched_waiting_io_write = 0x08,
thread_sched_waiting_io_force = 0x40, // ignore readable
} flags;
struct {
// should be compat with hrtime.h
#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL
int128_t timeout;
#else
uint64_t timeout;
#endif
int fd; // -1 for timeout only
int result;
} data;
// connected to timer_th.waiting
struct ccan_list_node node;
} waiting_reason;
struct rb_thread_sched_waiting waiting_reason;
bool finished;
bool malloc_stack;

Просмотреть файл

@ -546,15 +546,18 @@ static void
verify_waiting_list(void)
{
#if VM_CHECK_MODE > 0
rb_thread_t *wth, *prev_wth = NULL;
ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
struct rb_thread_sched_waiting *w, *prev_w = NULL;
// waiting list's timeout order should be [1, 2, 3, ..., 0, 0, 0]
ccan_list_for_each(&timer_th.waiting, w, node) {
// fprintf(stderr, "verify_waiting_list th:%u abs:%lu\n", rb_th_serial(wth), (unsigned long)wth->sched.waiting_reason.data.timeout);
if (prev_wth) {
rb_hrtime_t timeout = wth->sched.waiting_reason.data.timeout;
rb_hrtime_t prev_timeout = prev_wth->sched.waiting_reason.data.timeout;
if (prev_w) {
rb_hrtime_t timeout = w->data.timeout;
rb_hrtime_t prev_timeout = w->data.timeout;
VM_ASSERT(timeout == 0 || prev_timeout <= timeout);
}
prev_wth = wth;
prev_w = w;
}
#endif
}
@ -632,16 +635,17 @@ kqueue_unregister_waiting(int fd, enum thread_sched_waiting_flag flags)
static bool
kqueue_already_registered(int fd)
{
rb_thread_t *wth, *found_wth = NULL;
ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
struct rb_thread_sched_waiting *w, *found_w = NULL;
ccan_list_for_each(&timer_th.waiting, w, node) {
// Similar to EEXIST in epoll_ctl, but more strict because it checks fd rather than flags
// for simplicity
if (wth->sched.waiting_reason.flags && wth->sched.waiting_reason.data.fd == fd) {
found_wth = wth;
if (w->flags && w->data.fd == fd) {
found_w = w;
break;
}
}
return found_wth != NULL;
return found_w != NULL;
}
#endif // HAVE_SYS_EVENT_H
@ -786,20 +790,20 @@ timer_thread_register_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting
VM_ASSERT(flags & thread_sched_waiting_timeout);
// insert th to sorted list (TODO: O(n))
rb_thread_t *wth, *prev_wth = NULL;
struct rb_thread_sched_waiting *w, *prev_w = NULL;
ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
if ((wth->sched.waiting_reason.flags & thread_sched_waiting_timeout) &&
wth->sched.waiting_reason.data.timeout < abs) {
prev_wth = wth;
ccan_list_for_each(&timer_th.waiting, w, node) {
if ((w->flags & thread_sched_waiting_timeout) &&
w->data.timeout < abs) {
prev_w = w;
}
else {
break;
}
}
if (prev_wth) {
ccan_list_add_after(&timer_th.waiting, &prev_wth->sched.waiting_reason.node, &th->sched.waiting_reason.node);
if (prev_w) {
ccan_list_add_after(&timer_th.waiting, &prev_w->node, &th->sched.waiting_reason.node);
}
else {
ccan_list_add(&timer_th.waiting, &th->sched.waiting_reason.node);