revert r65471 and include Eric's patch as well

https://bugs.ruby-lang.org/issues/14867#note-112

I wanna touch similar places. To avoid our conflict, let me merge Eric's patch earlier.
Let's watch trunk-mjit / trunk-mjit-wait CIs.

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65473 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
k0kubun 2018-10-31 12:21:55 +00:00
Родитель 5bc93ec743
Коммит ff5dc2cbbf
2 изменённых файлов: 73 добавлений и 130 удалений

67
mjit.c
Просмотреть файл

@ -119,7 +119,7 @@ mjit_free_iseq(const rb_iseq_t *iseq)
static void static void
init_list(struct rb_mjit_unit_list *list) init_list(struct rb_mjit_unit_list *list)
{ {
list->head = NULL; list_head_init(&list->head);
list->length = 0; list->length = 0;
} }
@ -129,11 +129,11 @@ init_list(struct rb_mjit_unit_list *list)
static void static void
free_list(struct rb_mjit_unit_list *list) free_list(struct rb_mjit_unit_list *list)
{ {
struct rb_mjit_unit_node *node, *next; struct rb_mjit_unit *unit = 0, *next;
for (node = list->head; node != NULL; node = next) {
next = node->next; list_for_each_safe(&list->head, unit, next, unode) {
free_unit(node->unit); list_del(&unit->unode);
xfree(node); free_unit(unit);
} }
list->length = 0; list->length = 0;
} }
@ -248,24 +248,23 @@ unload_units(void)
{ {
rb_vm_t *vm = GET_THREAD()->vm; rb_vm_t *vm = GET_THREAD()->vm;
rb_thread_t *th = NULL; rb_thread_t *th = NULL;
struct rb_mjit_unit_node *node, *next, *worst_node; struct rb_mjit_unit *unit = 0, *next, *worst;
struct mjit_cont *cont; struct mjit_cont *cont;
int delete_num, units_num = active_units.length; int delete_num, units_num = active_units.length;
/* For now, we don't unload units when ISeq is GCed. We should /* For now, we don't unload units when ISeq is GCed. We should
unload such ISeqs first here. */ unload such ISeqs first here. */
for (node = active_units.head; node != NULL; node = next) { list_for_each_safe(&active_units.head, unit, next, unode) {
next = node->next; if (unit->iseq == NULL) { /* ISeq is GCed. */
if (node->unit->iseq == NULL) { /* ISeq is GCed. */ remove_from_list(unit, &active_units);
free_unit(node->unit); free_unit(unit);
remove_from_list(node, &active_units);
} }
} }
/* Detect units which are in use and can't be unloaded. */ /* Detect units which are in use and can't be unloaded. */
for (node = active_units.head; node != NULL; node = node->next) { list_for_each_safe(&active_units.head, unit, next, unode) {
assert(node->unit != NULL && node->unit->iseq != NULL && node->unit->handle != NULL); assert(unit->iseq != NULL && unit->handle != NULL);
node->unit->used_code_p = FALSE; unit->used_code_p = FALSE;
} }
list_for_each(&vm->living_threads, th, vmlt_node) { list_for_each(&vm->living_threads, th, vmlt_node) {
mark_ec_units(th->ec); mark_ec_units(th->ec);
@ -280,23 +279,23 @@ unload_units(void)
delete_num = active_units.length / 10; delete_num = active_units.length / 10;
for (; active_units.length > mjit_opts.max_cache_size - delete_num;) { for (; active_units.length > mjit_opts.max_cache_size - delete_num;) {
/* Find one unit that has the minimum total_calls. */ /* Find one unit that has the minimum total_calls. */
worst_node = NULL; worst = NULL;
for (node = active_units.head; node != NULL; node = node->next) { list_for_each_safe(&active_units.head, unit, next, unode) {
if (node->unit->used_code_p) /* We can't unload code on stack. */ if (unit->used_code_p) /* We can't unload code on stack. */
continue; continue;
if (worst_node == NULL || worst_node->unit->iseq->body->total_calls > node->unit->iseq->body->total_calls) { if (worst == NULL || worst->iseq->body->total_calls > unit->iseq->body->total_calls) {
worst_node = node; worst = unit;
} }
} }
if (worst_node == NULL) if (worst == NULL)
break; break;
/* Unload the worst node. */ /* Unload the worst node. */
verbose(2, "Unloading unit %d (calls=%lu)", worst_node->unit->id, worst_node->unit->iseq->body->total_calls); verbose(2, "Unloading unit %d (calls=%lu)", worst->id, worst->iseq->body->total_calls);
assert(worst_node->unit->handle != NULL); assert(worst->handle != NULL);
free_unit(worst_node->unit); remove_from_list(worst, &active_units);
remove_from_list(worst_node, &active_units); free_unit(worst);
} }
verbose(1, "Too many JIT code -- %d units unloaded", units_num - active_units.length); verbose(1, "Too many JIT code -- %d units unloaded", units_num - active_units.length);
} }
@ -306,8 +305,6 @@ unload_units(void)
void void
mjit_add_iseq_to_process(const rb_iseq_t *iseq) mjit_add_iseq_to_process(const rb_iseq_t *iseq)
{ {
struct rb_mjit_unit_node *node;
if (!mjit_enabled || pch_status == PCH_FAILED) if (!mjit_enabled || pch_status == PCH_FAILED)
return; return;
@ -317,14 +314,8 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq)
/* Failure in creating the unit. */ /* Failure in creating the unit. */
return; return;
node = create_list_node(iseq->body->jit_unit);
if (node == NULL) {
mjit_warning("failed to allocate a node to be added to unit_queue");
return;
}
CRITICAL_SECTION_START(3, "in add_iseq_to_process"); CRITICAL_SECTION_START(3, "in add_iseq_to_process");
add_to_list(node, &unit_queue); add_to_list(iseq->body->jit_unit, &unit_queue);
if (active_units.length >= mjit_opts.max_cache_size) { if (active_units.length >= mjit_opts.max_cache_size) {
unload_units(); unload_units();
} }
@ -762,14 +753,14 @@ mjit_finish(void)
void void
mjit_mark(void) mjit_mark(void)
{ {
struct rb_mjit_unit_node *node; struct rb_mjit_unit *unit = 0;
if (!mjit_enabled) if (!mjit_enabled)
return; return;
RUBY_MARK_ENTER("mjit"); RUBY_MARK_ENTER("mjit");
CRITICAL_SECTION_START(4, "mjit_mark"); CRITICAL_SECTION_START(4, "mjit_mark");
for (node = unit_queue.head; node != NULL; node = node->next) { list_for_each(&unit_queue.head, unit, unode) {
if (node->unit->iseq) { /* ISeq is still not GCed */ if (unit->iseq) { /* ISeq is still not GCed */
VALUE iseq = (VALUE)node->unit->iseq; VALUE iseq = (VALUE)unit->iseq;
CRITICAL_SECTION_FINISH(4, "mjit_mark rb_gc_mark"); CRITICAL_SECTION_FINISH(4, "mjit_mark rb_gc_mark");
/* Don't wrap critical section with this. This may trigger GC, /* Don't wrap critical section with this. This may trigger GC,

Просмотреть файл

@ -139,18 +139,12 @@ struct rb_mjit_unit {
#endif #endif
/* Only used by unload_units. Flag to check this unit is currently on stack or not. */ /* Only used by unload_units. Flag to check this unit is currently on stack or not. */
char used_code_p; char used_code_p;
}; struct list_node unode;
/* Node of linked list in struct rb_mjit_unit_list.
TODO: use ccan/list for this */
struct rb_mjit_unit_node {
struct rb_mjit_unit *unit;
struct rb_mjit_unit_node *next, *prev;
}; };
/* Linked list of struct rb_mjit_unit. */ /* Linked list of struct rb_mjit_unit. */
struct rb_mjit_unit_list { struct rb_mjit_unit_list {
struct rb_mjit_unit_node *head; struct list_head head;
int length; /* the list length */ int length; /* the list length */
}; };
@ -181,11 +175,11 @@ int mjit_call_p = FALSE;
/* Priority queue of iseqs waiting for JIT compilation. /* Priority queue of iseqs waiting for JIT compilation.
This variable is a pointer to head unit of the queue. */ This variable is a pointer to head unit of the queue. */
static struct rb_mjit_unit_list unit_queue; static struct rb_mjit_unit_list unit_queue = { LIST_HEAD_INIT(unit_queue.head) };
/* List of units which are successfully compiled. */ /* List of units which are successfully compiled. */
static struct rb_mjit_unit_list active_units; static struct rb_mjit_unit_list active_units = { LIST_HEAD_INIT(active_units.head) };
/* List of compacted so files which will be deleted in `mjit_finish()`. */ /* List of compacted so files which will be deleted in `mjit_finish()`. */
static struct rb_mjit_unit_list compact_units; static struct rb_mjit_unit_list compact_units = { LIST_HEAD_INIT(compact_units.head) };
/* The number of so far processed ISEQs, used to generate unique id. */ /* The number of so far processed ISEQs, used to generate unique id. */
static int current_unit_num; static int current_unit_num;
/* A mutex for conitionals and critical sections. */ /* A mutex for conitionals and critical sections. */
@ -318,57 +312,20 @@ mjit_warning(const char *format, ...)
} }
} }
/* Allocate struct rb_mjit_unit_node and return it. This MUST NOT be
called inside critical section because that causes deadlock. ZALLOC
may fire GC and GC hooks mjit_gc_start_hook that starts critical section. */
static struct rb_mjit_unit_node *
create_list_node(struct rb_mjit_unit *unit)
{
struct rb_mjit_unit_node *node = calloc(1, sizeof(struct rb_mjit_unit_node)); /* To prevent GC, don't use ZALLOC */
if (node == NULL) return NULL;
node->unit = unit;
return node;
}
/* Add unit node to the tail of doubly linked LIST. It should be not in /* Add unit node to the tail of doubly linked LIST. It should be not in
the list before. */ the list before. */
static void static void
add_to_list(struct rb_mjit_unit_node *node, struct rb_mjit_unit_list *list) add_to_list(struct rb_mjit_unit *unit, struct rb_mjit_unit_list *list)
{ {
/* Append iseq to list */ list_add_tail(&list->head, &unit->unode);
if (list->head == NULL) {
list->head = node;
}
else {
struct rb_mjit_unit_node *tail = list->head;
while (tail->next != NULL) {
tail = tail->next;
}
tail->next = node;
node->prev = tail;
}
list->length++; list->length++;
} }
static void static void
remove_from_list(struct rb_mjit_unit_node *node, struct rb_mjit_unit_list *list) remove_from_list(struct rb_mjit_unit *unit, struct rb_mjit_unit_list *list)
{ {
if (node->prev && node->next) { list_del(&unit->unode);
node->prev->next = node->next;
node->next->prev = node->prev;
}
else if (node->prev == NULL && node->next) {
list->head = node->next;
node->next->prev = NULL;
}
else if (node->prev && node->next == NULL) {
node->prev->next = NULL;
}
else {
list->head = NULL;
}
list->length--; list->length--;
free(node);
} }
static void static void
@ -497,28 +454,26 @@ mjit_valid_class_serial_p(rb_serial_t class_serial)
/* Return the best unit from list. The best is the first /* Return the best unit from list. The best is the first
high priority unit or the unit whose iseq has the biggest number high priority unit or the unit whose iseq has the biggest number
of calls so far. */ of calls so far. */
static struct rb_mjit_unit_node * static struct rb_mjit_unit *
get_from_list(struct rb_mjit_unit_list *list) get_from_list(struct rb_mjit_unit_list *list)
{ {
struct rb_mjit_unit_node *node, *next, *best = NULL; struct rb_mjit_unit *unit = NULL, *next, *best = NULL;
if (list->head == NULL)
return NULL;
/* Find iseq with max total_calls */ /* Find iseq with max total_calls */
for (node = list->head; node != NULL; node = next) { list_for_each_safe(&list->head, unit, next, unode) {
next = node->next; if (unit->iseq == NULL) { /* ISeq is GCed. */
if (node->unit->iseq == NULL) { /* ISeq is GCed. */ remove_from_list(unit, list);
free_unit(node->unit); free_unit(unit);
remove_from_list(node, list);
continue; continue;
} }
if (best == NULL || best->unit->iseq->body->total_calls < node->unit->iseq->body->total_calls) { if (best == NULL || best->iseq->body->total_calls < unit->iseq->body->total_calls) {
best = node; best = unit;
} }
} }
if (best) {
remove_from_list(best, list);
}
return best; return best;
} }
@ -881,8 +836,7 @@ static void
compact_all_jit_code(void) compact_all_jit_code(void)
{ {
# ifndef _WIN32 /* This requires header transformation but we don't transform header on Windows for now */ # ifndef _WIN32 /* This requires header transformation but we don't transform header on Windows for now */
struct rb_mjit_unit *unit; struct rb_mjit_unit *unit, *cur = 0;
struct rb_mjit_unit_node *node;
double start_time, end_time; double start_time, end_time;
static const char so_ext[] = DLEXT; static const char so_ext[] = DLEXT;
char so_file[MAXPATHLEN]; char so_file[MAXPATHLEN];
@ -899,8 +853,8 @@ compact_all_jit_code(void)
o_files = alloca(sizeof(char *) * (active_units.length + 1)); o_files = alloca(sizeof(char *) * (active_units.length + 1));
o_files[active_units.length] = NULL; o_files[active_units.length] = NULL;
CRITICAL_SECTION_START(3, "in compact_all_jit_code to keep .o files"); CRITICAL_SECTION_START(3, "in compact_all_jit_code to keep .o files");
for (node = active_units.head; node != NULL; node = node->next) { list_for_each(&active_units.head, cur, unode) {
o_files[i] = node->unit->o_file; o_files[i] = cur->o_file;
i++; i++;
} }
@ -924,27 +878,25 @@ compact_all_jit_code(void)
unit->handle = handle; unit->handle = handle;
/* lazily dlclose handle (and .so file for win32) on `mjit_finish()`. */ /* lazily dlclose handle (and .so file for win32) on `mjit_finish()`. */
node = calloc(1, sizeof(struct rb_mjit_unit_node)); /* To prevent GC, don't use ZALLOC */ add_to_list(unit, &compact_units);
node->unit = unit;
add_to_list(node, &compact_units);
if (!mjit_opts.save_temps) if (!mjit_opts.save_temps)
remove_so_file(so_file, unit); remove_so_file(so_file, unit);
CRITICAL_SECTION_START(3, "in compact_all_jit_code to read list"); CRITICAL_SECTION_START(3, "in compact_all_jit_code to read list");
for (node = active_units.head; node != NULL; node = node->next) { list_for_each(&active_units.head, cur, unode) {
void *func; void *func;
char funcname[35]; /* TODO: reconsider `35` */ char funcname[35]; /* TODO: reconsider `35` */
sprintf(funcname, "_mjit%d", node->unit->id); sprintf(funcname, "_mjit%d", cur->id);
if ((func = dlsym(handle, funcname)) == NULL) { if ((func = dlsym(handle, funcname)) == NULL) {
mjit_warning("skipping to reload '%s' from '%s': %s", funcname, so_file, dlerror()); mjit_warning("skipping to reload '%s' from '%s': %s", funcname, so_file, dlerror());
continue; continue;
} }
if (node->unit->iseq) { /* Check whether GCed or not */ if (cur->iseq) { /* Check whether GCed or not */
/* Usage of jit_code might be not in a critical section. */ /* Usage of jit_code might be not in a critical section. */
MJIT_ATOMIC_SET(node->unit->iseq->body->jit_func, (mjit_func_t)func); MJIT_ATOMIC_SET(cur->iseq->body->jit_func, (mjit_func_t)func);
} }
} }
CRITICAL_SECTION_FINISH(3, "in compact_all_jit_code to read list"); CRITICAL_SECTION_FINISH(3, "in compact_all_jit_code to read list");
@ -1089,6 +1041,13 @@ convert_unit_to_func(struct rb_mjit_unit *unit, struct rb_call_cache *cc_entries
in_jit = TRUE; in_jit = TRUE;
CRITICAL_SECTION_FINISH(3, "before mjit_compile to wait GC finish"); CRITICAL_SECTION_FINISH(3, "before mjit_compile to wait GC finish");
/* We need to check again here because we could've waited on GC above */
if (unit->iseq == NULL) {
if (!mjit_opts.save_temps)
remove_file(c_file);
free_unit(unit);
return (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC;
}
{ {
VALUE s = rb_iseq_path(unit->iseq); VALUE s = rb_iseq_path(unit->iseq);
const char *label = RSTRING_PTR(unit->iseq->body->location.label); const char *label = RSTRING_PTR(unit->iseq->body->location.label);
@ -1146,14 +1105,8 @@ convert_unit_to_func(struct rb_mjit_unit *unit, struct rb_call_cache *cc_entries
remove_so_file(so_file, unit); remove_so_file(so_file, unit);
if ((uintptr_t)func > (uintptr_t)LAST_JIT_ISEQ_FUNC) { if ((uintptr_t)func > (uintptr_t)LAST_JIT_ISEQ_FUNC) {
struct rb_mjit_unit_node *node = create_list_node(unit);
if (node == NULL) {
mjit_warning("failed to allocate a node to be added to active_units");
return (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC;
}
CRITICAL_SECTION_START(3, "end of jit"); CRITICAL_SECTION_START(3, "end of jit");
add_to_list(node, &active_units); add_to_list(unit, &active_units);
if (unit->iseq) if (unit->iseq)
print_jit_result("success", unit, end_time - start_time, c_file); print_jit_result("success", unit, end_time - start_time, c_file);
CRITICAL_SECTION_FINISH(3, "end of jit"); CRITICAL_SECTION_FINISH(3, "end of jit");
@ -1216,22 +1169,22 @@ mjit_worker(void)
/* main worker loop */ /* main worker loop */
while (!stop_worker_p) { while (!stop_worker_p) {
struct rb_mjit_unit_node *node; struct rb_mjit_unit *unit;
/* wait until unit is available */ /* wait until unit is available */
CRITICAL_SECTION_START(3, "in worker dequeue"); CRITICAL_SECTION_START(3, "in worker dequeue");
while ((unit_queue.head == NULL || active_units.length >= mjit_opts.max_cache_size) && !stop_worker_p) { while ((list_empty(&unit_queue.head) || active_units.length >= mjit_opts.max_cache_size) && !stop_worker_p) {
rb_native_cond_wait(&mjit_worker_wakeup, &mjit_engine_mutex); rb_native_cond_wait(&mjit_worker_wakeup, &mjit_engine_mutex);
verbose(3, "Getting wakeup from client"); verbose(3, "Getting wakeup from client");
} }
node = get_from_list(&unit_queue); unit = get_from_list(&unit_queue);
CRITICAL_SECTION_FINISH(3, "in worker dequeue"); CRITICAL_SECTION_FINISH(3, "in worker dequeue");
if (node) { if (unit) {
mjit_func_t func; mjit_func_t func;
struct mjit_copy_job job; struct mjit_copy_job job;
job.body = node->unit->iseq->body; job.body = unit->iseq->body;
job.cc_entries = NULL; job.cc_entries = NULL;
if (job.body->ci_size > 0 || job.body->ci_kw_size > 0) if (job.body->ci_size > 0 || job.body->ci_kw_size > 0)
job.cc_entries = alloca(sizeof(struct rb_call_cache) * (job.body->ci_size + job.body->ci_kw_size)); job.cc_entries = alloca(sizeof(struct rb_call_cache) * (job.body->ci_size + job.body->ci_kw_size));
@ -1250,14 +1203,13 @@ mjit_worker(void)
} }
/* JIT compile */ /* JIT compile */
func = convert_unit_to_func(node->unit, job.cc_entries, job.is_entries); func = convert_unit_to_func(unit, job.cc_entries, job.is_entries);
CRITICAL_SECTION_START(3, "in jit func replace"); CRITICAL_SECTION_START(3, "in jit func replace");
if (node->unit->iseq) { /* Check whether GCed or not */ if (unit->iseq) { /* Check whether GCed or not */
/* Usage of jit_code might be not in a critical section. */ /* Usage of jit_code might be not in a critical section. */
MJIT_ATOMIC_SET(node->unit->iseq->body->jit_func, func); MJIT_ATOMIC_SET(unit->iseq->body->jit_func, func);
} }
remove_from_list(node, &unit_queue);
CRITICAL_SECTION_FINISH(3, "in jit func replace"); CRITICAL_SECTION_FINISH(3, "in jit func replace");
#ifndef _MSC_VER #ifndef _MSC_VER