зеркало из https://github.com/github/ruby.git
28a1c4f33e
seems to call an improper
ensure clause. [Bug #20655]
Than fixing it properly, I bet it would be much better to simply revert
that commit. It reduces the unneeded complexity. Jumping into a block
called by a C function like Hash#each with callcc is user's fault.
It does not need serious support.
This commit is contained in:
Родитель
c884db0b5b
Коммит
ac5ac48a36
94
cont.c
94
cont.c
|
@ -226,7 +226,6 @@ typedef struct rb_context_struct {
|
|||
} machine;
|
||||
rb_execution_context_t saved_ec;
|
||||
rb_jmpbuf_t jmpbuf;
|
||||
rb_ensure_entry_t *ensure_array;
|
||||
struct rb_jit_cont *jit_cont; // Continuation contexts for JITs
|
||||
} rb_context_t;
|
||||
|
||||
|
@ -1053,7 +1052,6 @@ cont_free(void *ptr)
|
|||
|
||||
if (cont->type == CONTINUATION_CONTEXT) {
|
||||
ruby_xfree(cont->saved_ec.vm_stack);
|
||||
ruby_xfree(cont->ensure_array);
|
||||
RUBY_FREE_UNLESS_NULL(cont->machine.stack);
|
||||
}
|
||||
else {
|
||||
|
@ -1458,22 +1456,6 @@ cont_capture(volatile int *volatile stat)
|
|||
VM_ASSERT(cont->saved_ec.cfp != NULL);
|
||||
cont_save_machine_stack(th, cont);
|
||||
|
||||
/* backup ensure_list to array for search in another context */
|
||||
{
|
||||
rb_ensure_list_t *p;
|
||||
int size = 0;
|
||||
rb_ensure_entry_t *entry;
|
||||
for (p=th->ec->ensure_list; p; p=p->next)
|
||||
size++;
|
||||
entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
|
||||
for (p=th->ec->ensure_list; p; p=p->next) {
|
||||
if (!p->entry.marker)
|
||||
p->entry.marker = rb_ary_hidden_new(0); /* dummy object */
|
||||
*entry++ = p->entry;
|
||||
}
|
||||
entry->marker = 0;
|
||||
}
|
||||
|
||||
if (ruby_setjmp(cont->jmpbuf)) {
|
||||
VALUE value;
|
||||
|
||||
|
@ -1534,7 +1516,6 @@ cont_restore_thread(rb_context_t *cont)
|
|||
th->ec->tag = sec->tag;
|
||||
th->ec->root_lep = sec->root_lep;
|
||||
th->ec->root_svar = sec->root_svar;
|
||||
th->ec->ensure_list = sec->ensure_list;
|
||||
th->ec->errinfo = sec->errinfo;
|
||||
|
||||
VM_ASSERT(th->ec->vm_stack != NULL);
|
||||
|
@ -1797,80 +1778,6 @@ make_passing_arg(int argc, const VALUE *argv)
|
|||
|
||||
typedef VALUE e_proc(VALUE);
|
||||
|
||||
/* CAUTION!! : Currently, error in rollback_func is not supported */
|
||||
/* same as rb_protect if set rollback_func to NULL */
|
||||
void
|
||||
ruby_register_rollback_func_for_ensure(e_proc *ensure_func, e_proc *rollback_func)
|
||||
{
|
||||
st_table **table_p = &GET_VM()->ensure_rollback_table;
|
||||
if (UNLIKELY(*table_p == NULL)) {
|
||||
*table_p = st_init_numtable();
|
||||
}
|
||||
st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
|
||||
}
|
||||
|
||||
static inline e_proc *
|
||||
lookup_rollback_func(e_proc *ensure_func)
|
||||
{
|
||||
st_table *table = GET_VM()->ensure_rollback_table;
|
||||
st_data_t val;
|
||||
if (table && st_lookup(table, (st_data_t)ensure_func, &val))
|
||||
return (e_proc *) val;
|
||||
return (e_proc *) Qundef;
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
|
||||
{
|
||||
rb_ensure_list_t *p;
|
||||
rb_ensure_entry_t *entry;
|
||||
size_t i, j;
|
||||
size_t cur_size;
|
||||
size_t target_size;
|
||||
size_t base_point;
|
||||
e_proc *func;
|
||||
|
||||
cur_size = 0;
|
||||
for (p=current; p; p=p->next)
|
||||
cur_size++;
|
||||
target_size = 0;
|
||||
for (entry=target; entry->marker; entry++)
|
||||
target_size++;
|
||||
|
||||
/* search common stack point */
|
||||
p = current;
|
||||
base_point = cur_size;
|
||||
while (base_point) {
|
||||
if (target_size >= base_point &&
|
||||
p->entry.marker == target[target_size - base_point].marker)
|
||||
break;
|
||||
base_point --;
|
||||
p = p->next;
|
||||
}
|
||||
|
||||
/* rollback function check */
|
||||
for (i=0; i < target_size - base_point; i++) {
|
||||
if (!lookup_rollback_func(target[i].e_proc)) {
|
||||
rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
|
||||
}
|
||||
}
|
||||
/* pop ensure stack */
|
||||
while (cur_size > base_point) {
|
||||
/* escape from ensure block */
|
||||
(*current->entry.e_proc)(current->entry.data2);
|
||||
current = current->next;
|
||||
cur_size--;
|
||||
}
|
||||
/* push ensure stack */
|
||||
for (j = 0; j < i; j++) {
|
||||
func = lookup_rollback_func(target[i - j - 1].e_proc);
|
||||
if (!UNDEF_P((VALUE)func)) {
|
||||
(*func)(target[i - j - 1].data2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NORETURN(static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval));
|
||||
|
||||
/*
|
||||
|
@ -1902,7 +1809,6 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
|
|||
rb_raise(rb_eRuntimeError, "continuation called across fiber");
|
||||
}
|
||||
}
|
||||
rollback_ensure_stack(contval, th->ec->ensure_list, cont->ensure_array);
|
||||
|
||||
cont->argc = argc;
|
||||
cont->value = make_passing_arg(argc, argv);
|
||||
|
|
9
eval.c
9
eval.c
|
@ -1048,12 +1048,6 @@ rb_ensure(VALUE (*b_proc)(VALUE), VALUE data1, VALUE (*e_proc)(VALUE), VALUE dat
|
|||
volatile VALUE result = Qnil;
|
||||
VALUE errinfo;
|
||||
rb_execution_context_t * volatile ec = GET_EC();
|
||||
rb_ensure_list_t ensure_list;
|
||||
ensure_list.entry.marker = 0;
|
||||
ensure_list.entry.e_proc = e_proc;
|
||||
ensure_list.entry.data2 = data2;
|
||||
ensure_list.next = ec->ensure_list;
|
||||
ec->ensure_list = &ensure_list;
|
||||
EC_PUSH_TAG(ec);
|
||||
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
|
||||
result = (*b_proc) (data1);
|
||||
|
@ -1063,8 +1057,7 @@ rb_ensure(VALUE (*b_proc)(VALUE), VALUE data1, VALUE (*e_proc)(VALUE), VALUE dat
|
|||
if (!NIL_P(errinfo) && !RB_TYPE_P(errinfo, T_OBJECT)) {
|
||||
ec->errinfo = Qnil;
|
||||
}
|
||||
ec->ensure_list=ensure_list.next;
|
||||
(*ensure_list.entry.e_proc)(ensure_list.entry.data2);
|
||||
(*e_proc)(data2);
|
||||
ec->errinfo = errinfo;
|
||||
if (state)
|
||||
EC_JUMP_TAG(ec, state);
|
||||
|
|
10
hash.c
10
hash.c
|
@ -1395,13 +1395,6 @@ hash_iter_lev_dec(VALUE hash)
|
|||
iter_lev_in_flags_set(hash, lev - 1);
|
||||
}
|
||||
|
||||
static VALUE
|
||||
hash_foreach_ensure_rollback(VALUE hash)
|
||||
{
|
||||
hash_iter_lev_inc(hash);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
hash_foreach_ensure(VALUE hash)
|
||||
{
|
||||
|
@ -7445,9 +7438,6 @@ Init_Hash(void)
|
|||
*/
|
||||
rb_define_global_const("ENV", envtbl);
|
||||
|
||||
/* for callcc */
|
||||
ruby_register_rollback_func_for_ensure(hash_foreach_ensure, hash_foreach_ensure_rollback);
|
||||
|
||||
HASH_ASSERT(sizeof(ar_hint_t) * RHASH_AR_TABLE_MAX_SIZE == sizeof(VALUE));
|
||||
}
|
||||
|
||||
|
|
|
@ -1057,7 +1057,6 @@ module RubyVM::RJIT # :nodoc: all
|
|||
storage: [self.VALUE, Primitive.cexpr!("OFFSETOF((*((struct rb_execution_context_struct *)NULL)), storage)")],
|
||||
root_lep: [CType::Pointer.new { self.VALUE }, Primitive.cexpr!("OFFSETOF((*((struct rb_execution_context_struct *)NULL)), root_lep)")],
|
||||
root_svar: [self.VALUE, Primitive.cexpr!("OFFSETOF((*((struct rb_execution_context_struct *)NULL)), root_svar)")],
|
||||
ensure_list: [CType::Pointer.new { self.rb_ensure_list_t }, Primitive.cexpr!("OFFSETOF((*((struct rb_execution_context_struct *)NULL)), ensure_list)")],
|
||||
trace_arg: [CType::Pointer.new { self.rb_trace_arg_struct }, Primitive.cexpr!("OFFSETOF((*((struct rb_execution_context_struct *)NULL)), trace_arg)")],
|
||||
errinfo: [self.VALUE, Primitive.cexpr!("OFFSETOF((*((struct rb_execution_context_struct *)NULL)), errinfo)")],
|
||||
passed_block_handler: [self.VALUE, Primitive.cexpr!("OFFSETOF((*((struct rb_execution_context_struct *)NULL)), passed_block_handler)")],
|
||||
|
|
2
vm.c
2
vm.c
|
@ -3080,7 +3080,6 @@ ruby_vm_destruct(rb_vm_t *vm)
|
|||
xfree(GET_SHAPE_TREE());
|
||||
|
||||
st_free_table(vm->static_ext_inits);
|
||||
st_free_table(vm->ensure_rollback_table);
|
||||
|
||||
rb_vm_postponed_job_free();
|
||||
|
||||
|
@ -3208,7 +3207,6 @@ vm_memsize(const void *ptr)
|
|||
rb_vm_memsize_waiting_fds(&vm->waiting_fds) +
|
||||
rb_st_memsize(vm->loaded_features_index) +
|
||||
rb_st_memsize(vm->loading_table) +
|
||||
rb_st_memsize(vm->ensure_rollback_table) +
|
||||
rb_vm_memsize_postponed_job_queue() +
|
||||
rb_vm_memsize_workqueue(&vm->workqueue) +
|
||||
vm_memsize_at_exit_list(vm->at_exit) +
|
||||
|
|
17
vm_core.h
17
vm_core.h
|
@ -734,9 +734,6 @@ typedef struct rb_vm_struct {
|
|||
VALUE cmd[RUBY_NSIG];
|
||||
} trap_list;
|
||||
|
||||
/* relation table of ensure - rollback for callcc */
|
||||
struct st_table *ensure_rollback_table;
|
||||
|
||||
/* postponed_job (async-signal-safe, and thread-safe) */
|
||||
struct rb_postponed_job_queue *postponed_job_queue;
|
||||
|
||||
|
@ -974,17 +971,6 @@ struct rb_unblock_callback {
|
|||
|
||||
struct rb_mutex_struct;
|
||||
|
||||
typedef struct rb_ensure_entry {
|
||||
VALUE marker;
|
||||
VALUE (*e_proc)(VALUE);
|
||||
VALUE data2;
|
||||
} rb_ensure_entry_t;
|
||||
|
||||
typedef struct rb_ensure_list {
|
||||
struct rb_ensure_list *next;
|
||||
struct rb_ensure_entry entry;
|
||||
} rb_ensure_list_t;
|
||||
|
||||
typedef struct rb_fiber_struct rb_fiber_t;
|
||||
|
||||
struct rb_waiting_list {
|
||||
|
@ -1023,9 +1009,6 @@ struct rb_execution_context_struct {
|
|||
const VALUE *root_lep;
|
||||
VALUE root_svar;
|
||||
|
||||
/* ensure & callcc */
|
||||
rb_ensure_list_t *ensure_list;
|
||||
|
||||
/* trace information */
|
||||
struct rb_trace_arg_struct *trace_arg;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче