2020-04-10 08:11:40 +03:00
|
|
|
#ifndef RUBY_VM_CORE_H
|
|
|
|
#define RUBY_VM_CORE_H
|
2006-12-31 18:02:22 +03:00
|
|
|
/**********************************************************************
|
|
|
|
|
2008-12-09 10:17:10 +03:00
|
|
|
vm_core.h -
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
$Author$
|
|
|
|
created at: 04/01/01 19:41:38 JST
|
|
|
|
|
* blockinlining.c: remove "yarv" prefix.
* array.c, numeric.c: ditto.
* insnhelper.ci, insns.def, vm_evalbody.ci: ditto.
* yarvcore.c: removed.
* yarvcore.h: renamed to core.h.
* cont.c, debug.c, error.c, process.c, signal.c : ditto.
* ext/probeprofiler/probeprofiler.c: ditto.
* id.c, id.h: added.
* inits.c: ditto.
* compile.c: rename internal functions.
* compile.h: fix debug flag.
* eval.c, object.c, vm.c: remove ruby_top_self.
use rb_vm_top_self() instead.
* eval_intern.h, eval_load: ditto.
* gc.c: rename yarv_machine_stack_mark() to
rb_gc_mark_machine_stack().
* insnhelper.h: remove unused macros.
* iseq.c: add iseq_compile() to create iseq object
from source string.
* proc.c: rename a internal function.
* template/insns.inc.tmpl: remove YARV prefix.
* thread.c:
* vm.c (rb_iseq_eval): added.
* vm.c: move some functions from yarvcore.c.
* vm_dump.c: fix to remove compiler warning.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12741 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-07-12 08:25:46 +04:00
|
|
|
Copyright (C) 2004-2007 Koichi Sasada
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
**********************************************************************/
|
|
|
|
|
2015-06-11 02:25:30 +03:00
|
|
|
/*
|
|
|
|
* Enable check mode.
|
|
|
|
* 1: enable local assertions.
|
|
|
|
*/
|
|
|
|
#ifndef VM_CHECK_MODE
|
2019-07-15 05:27:38 +03:00
|
|
|
|
|
|
|
// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
|
|
|
|
#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
|
|
|
|
|
|
|
|
#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
|
2015-06-11 02:25:30 +03:00
|
|
|
#endif
|
|
|
|
|
2015-10-23 19:57:58 +03:00
|
|
|
/**
|
|
|
|
* VM Debug Level
|
|
|
|
*
|
|
|
|
* debug level:
|
|
|
|
* 0: no debug output
|
|
|
|
* 1: show instruction name
|
|
|
|
* 2: show stack frame when control stack frame is changed
|
|
|
|
* 3: show stack status
|
|
|
|
* 4: show register
|
|
|
|
* 5:
|
|
|
|
* 10: gc check
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef VMDEBUG
|
|
|
|
#define VMDEBUG 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
#undef VMDEBUG
|
|
|
|
#define VMDEBUG 3
|
|
|
|
#endif
|
|
|
|
|
2020-05-08 12:31:09 +03:00
|
|
|
#include "ruby/internal/config.h"
|
2019-12-04 11:16:30 +03:00
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
|
2016-01-25 11:34:00 +03:00
|
|
|
#include "ruby_assert.h"
|
|
|
|
|
2023-02-17 17:14:53 +03:00
|
|
|
#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
|
|
|
|
|
2015-06-11 02:25:30 +03:00
|
|
|
#if VM_CHECK_MODE > 0
|
2024-01-31 09:11:59 +03:00
|
|
|
#define VM_ASSERT(/*expr, */...) RUBY_ASSERT_WHEN(VM_CHECK_MODE > 0, __VA_ARGS__)
|
2016-07-28 14:02:30 +03:00
|
|
|
#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
|
2022-05-16 15:50:02 +03:00
|
|
|
#define RUBY_ASSERT_CRITICAL_SECTION
|
|
|
|
#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
|
2015-06-11 02:25:30 +03:00
|
|
|
#else
|
2024-01-31 09:11:59 +03:00
|
|
|
#define VM_ASSERT(/*expr, */...) ((void)0)
|
2017-08-10 14:40:49 +03:00
|
|
|
#define VM_UNREACHABLE(func) UNREACHABLE
|
2022-05-16 15:50:02 +03:00
|
|
|
#define RUBY_DEBUG_THREAD_SCHEDULE()
|
2015-06-11 02:25:30 +03:00
|
|
|
#endif
|
|
|
|
|
2022-05-16 15:50:02 +03:00
|
|
|
#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
|
|
|
|
|
|
|
|
#if defined(RUBY_ASSERT_CRITICAL_SECTION)
|
|
|
|
// TODO add documentation
|
|
|
|
extern int ruby_assert_critical_section_entered;
|
|
|
|
#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
|
|
|
|
#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
|
2022-05-15 07:07:12 +03:00
|
|
|
#else
|
2022-05-16 15:50:02 +03:00
|
|
|
#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
|
|
|
|
#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
|
2022-05-15 07:07:12 +03:00
|
|
|
#endif
|
|
|
|
|
2022-01-15 16:18:55 +03:00
|
|
|
#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
|
|
|
|
# include "wasm/setjmp.h"
|
|
|
|
#else
|
|
|
|
# include <setjmp.h>
|
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
#if defined(__linux__) || defined(__FreeBSD__)
|
|
|
|
# define RB_THREAD_T_HAS_NATIVE_ID
|
|
|
|
#endif
|
|
|
|
|
2020-05-08 12:31:09 +03:00
|
|
|
#include "ruby/internal/stdbool.h"
|
2020-04-08 07:28:13 +03:00
|
|
|
#include "ccan/list/list.h"
|
* blockinlining.c: remove "yarv" prefix.
* array.c, numeric.c: ditto.
* insnhelper.ci, insns.def, vm_evalbody.ci: ditto.
* yarvcore.c: removed.
* yarvcore.h: renamed to core.h.
* cont.c, debug.c, error.c, process.c, signal.c : ditto.
* ext/probeprofiler/probeprofiler.c: ditto.
* id.c, id.h: added.
* inits.c: ditto.
* compile.c: rename internal functions.
* compile.h: fix debug flag.
* eval.c, object.c, vm.c: remove ruby_top_self.
use rb_vm_top_self() instead.
* eval_intern.h, eval_load: ditto.
* gc.c: rename yarv_machine_stack_mark() to
rb_gc_mark_machine_stack().
* insnhelper.h: remove unused macros.
* iseq.c: add iseq_compile() to create iseq object
from source string.
* proc.c: rename a internal function.
* template/insns.inc.tmpl: remove YARV prefix.
* thread.c:
* vm.c (rb_iseq_eval): added.
* vm.c: move some functions from yarvcore.c.
* vm_dump.c: fix to remove compiler warning.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12741 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-07-12 08:25:46 +04:00
|
|
|
#include "id.h"
|
2019-12-04 11:16:30 +03:00
|
|
|
#include "internal.h"
|
|
|
|
#include "internal/array.h"
|
2022-12-01 04:28:14 +03:00
|
|
|
#include "internal/basic_operators.h"
|
2024-01-17 03:45:33 +03:00
|
|
|
#include "internal/sanitizers.h"
|
2019-12-04 11:16:30 +03:00
|
|
|
#include "internal/serial.h"
|
|
|
|
#include "internal/vm.h"
|
2009-07-15 18:59:41 +04:00
|
|
|
#include "method.h"
|
2019-12-04 11:16:30 +03:00
|
|
|
#include "node.h"
|
|
|
|
#include "ruby/ruby.h"
|
|
|
|
#include "ruby/st.h"
|
2012-11-09 20:05:07 +04:00
|
|
|
#include "ruby_atomic.h"
|
2019-12-04 11:16:30 +03:00
|
|
|
#include "vm_opts.h"
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2014-05-14 14:55:38 +04:00
|
|
|
#include "ruby/thread_native.h"
|
2019-12-04 11:16:30 +03:00
|
|
|
/*
|
|
|
|
* implementation selector of get_insn_info algorithm
|
|
|
|
* 0: linear search
|
|
|
|
* 1: binary search
|
|
|
|
* 2: succinct bitvector
|
|
|
|
*/
|
|
|
|
#ifndef VM_INSN_INFO_TABLE_IMPL
|
|
|
|
# define VM_INSN_INFO_TABLE_IMPL 2
|
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2018-10-19 06:33:48 +03:00
|
|
|
#if defined(NSIG_MAX) /* POSIX issue 8 */
|
|
|
|
# undef NSIG
|
|
|
|
# define NSIG NSIG_MAX
|
|
|
|
#elif defined(_SIG_MAXSIG) /* FreeBSD */
|
|
|
|
# undef NSIG
|
|
|
|
# define NSIG _SIG_MAXSIG
|
|
|
|
#elif defined(_SIGMAX) /* QNX */
|
|
|
|
# define NSIG (_SIGMAX + 1)
|
|
|
|
#elif defined(NSIG) /* 99% of everything else */
|
|
|
|
# /* take it */
|
|
|
|
#else /* Last resort */
|
|
|
|
# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
|
2006-12-31 18:02:22 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define RUBY_NSIG NSIG
|
|
|
|
|
2018-07-05 06:02:33 +03:00
|
|
|
#if defined(SIGCLD)
|
2023-05-15 17:14:51 +03:00
|
|
|
# define RUBY_SIGCHLD (SIGCLD)
|
2018-07-05 06:02:33 +03:00
|
|
|
#elif defined(SIGCHLD)
|
2023-05-15 17:14:51 +03:00
|
|
|
# define RUBY_SIGCHLD (SIGCHLD)
|
2018-07-05 06:02:33 +03:00
|
|
|
#endif
|
|
|
|
|
2010-05-13 20:20:26 +04:00
|
|
|
#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
|
2018-04-21 00:38:27 +03:00
|
|
|
# define USE_SIGALTSTACK
|
2020-03-06 15:32:42 +03:00
|
|
|
void *rb_allocate_sigaltstack(void);
|
|
|
|
void *rb_register_sigaltstack(void *);
|
|
|
|
# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
|
2021-09-06 08:22:24 +03:00
|
|
|
# define RB_ALTSTACK_FREE(var) free(var)
|
2018-07-29 13:15:11 +03:00
|
|
|
# define RB_ALTSTACK(var) var
|
2018-04-21 00:38:27 +03:00
|
|
|
#else /* noop */
|
2020-03-06 15:32:42 +03:00
|
|
|
# define RB_ALTSTACK_INIT(var, altstack)
|
2018-04-21 00:38:27 +03:00
|
|
|
# define RB_ALTSTACK_FREE(var)
|
2018-07-29 13:15:11 +03:00
|
|
|
# define RB_ALTSTACK(var) (0)
|
2010-05-13 20:20:26 +04:00
|
|
|
#endif
|
|
|
|
|
2022-05-24 10:39:45 +03:00
|
|
|
#include THREAD_IMPL_H
|
|
|
|
#define RUBY_VM_THREAD_MODEL 2
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
/*****************/
|
|
|
|
/* configuration */
|
|
|
|
/*****************/
|
|
|
|
|
|
|
|
/* gcc ver. check */
|
|
|
|
#if defined(__GNUC__) && __GNUC__ >= 2
|
|
|
|
|
|
|
|
#if OPT_TOKEN_THREADED_CODE
|
|
|
|
#if OPT_DIRECT_THREADED_CODE
|
|
|
|
#undef OPT_DIRECT_THREADED_CODE
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#else /* defined(__GNUC__) && __GNUC__ >= 2 */
|
|
|
|
|
|
|
|
/* disable threaded code options */
|
|
|
|
#if OPT_DIRECT_THREADED_CODE
|
|
|
|
#undef OPT_DIRECT_THREADED_CODE
|
|
|
|
#endif
|
|
|
|
#if OPT_TOKEN_THREADED_CODE
|
|
|
|
#undef OPT_TOKEN_THREADED_CODE
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* call threaded code */
|
|
|
|
#if OPT_CALL_THREADED_CODE
|
|
|
|
#if OPT_DIRECT_THREADED_CODE
|
|
|
|
#undef OPT_DIRECT_THREADED_CODE
|
|
|
|
#endif /* OPT_DIRECT_THREADED_CODE */
|
|
|
|
#endif /* OPT_CALL_THREADED_CODE */
|
|
|
|
|
2018-08-23 11:32:30 +03:00
|
|
|
void rb_vm_encoded_insn_data_table_init(void);
|
2008-05-23 07:23:08 +04:00
|
|
|
typedef unsigned long rb_num_t;
|
2018-01-09 16:30:27 +03:00
|
|
|
typedef signed long rb_snum_t;
|
2008-05-23 07:23:08 +04:00
|
|
|
|
2015-07-17 16:18:12 +03:00
|
|
|
enum ruby_tag_type {
|
2017-07-12 18:02:09 +03:00
|
|
|
RUBY_TAG_NONE = 0x0,
|
2015-07-17 16:18:12 +03:00
|
|
|
RUBY_TAG_RETURN = 0x1,
|
|
|
|
RUBY_TAG_BREAK = 0x2,
|
|
|
|
RUBY_TAG_NEXT = 0x3,
|
|
|
|
RUBY_TAG_RETRY = 0x4,
|
|
|
|
RUBY_TAG_REDO = 0x5,
|
|
|
|
RUBY_TAG_RAISE = 0x6,
|
|
|
|
RUBY_TAG_THROW = 0x7,
|
|
|
|
RUBY_TAG_FATAL = 0x8,
|
|
|
|
RUBY_TAG_MASK = 0xf
|
|
|
|
};
|
2017-06-23 10:25:52 +03:00
|
|
|
|
2017-07-12 18:02:09 +03:00
|
|
|
#define TAG_NONE RUBY_TAG_NONE
|
2015-07-17 16:18:12 +03:00
|
|
|
#define TAG_RETURN RUBY_TAG_RETURN
|
|
|
|
#define TAG_BREAK RUBY_TAG_BREAK
|
|
|
|
#define TAG_NEXT RUBY_TAG_NEXT
|
|
|
|
#define TAG_RETRY RUBY_TAG_RETRY
|
|
|
|
#define TAG_REDO RUBY_TAG_REDO
|
|
|
|
#define TAG_RAISE RUBY_TAG_RAISE
|
|
|
|
#define TAG_THROW RUBY_TAG_THROW
|
|
|
|
#define TAG_FATAL RUBY_TAG_FATAL
|
|
|
|
#define TAG_MASK RUBY_TAG_MASK
|
|
|
|
|
2015-07-20 03:08:23 +03:00
|
|
|
enum ruby_vm_throw_flags {
|
|
|
|
VM_THROW_NO_ESCAPE_FLAG = 0x8000,
|
|
|
|
VM_THROW_STATE_MASK = 0xff
|
|
|
|
};
|
|
|
|
|
2015-10-29 08:45:18 +03:00
|
|
|
/* forward declarations */
|
|
|
|
struct rb_thread_struct;
|
|
|
|
struct rb_control_frame_struct;
|
|
|
|
|
2009-07-15 18:59:41 +04:00
|
|
|
/* iseq data type */
|
2010-10-31 04:42:54 +03:00
|
|
|
typedef struct rb_compile_option_struct rb_compile_option_t;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2021-10-28 19:58:15 +03:00
|
|
|
union ic_serial_entry {
|
|
|
|
rb_serial_t raw;
|
|
|
|
VALUE data[2];
|
|
|
|
};
|
|
|
|
|
2021-01-04 12:08:25 +03:00
|
|
|
// imemo_constcache
|
|
|
|
struct iseq_inline_constant_cache_entry {
|
|
|
|
VALUE flags;
|
|
|
|
|
|
|
|
VALUE value; // v0
|
2022-03-31 18:04:25 +03:00
|
|
|
VALUE _unused1; // v1
|
|
|
|
VALUE _unused2; // v2
|
2021-06-09 13:15:57 +03:00
|
|
|
const rb_cref_t *ic_cref; // v3
|
|
|
|
};
|
|
|
|
STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
|
|
|
|
(offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
|
2022-11-08 22:09:43 +03:00
|
|
|
sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
|
2021-04-30 15:35:15 +03:00
|
|
|
|
2021-01-04 12:08:25 +03:00
|
|
|
struct iseq_inline_constant_cache {
|
|
|
|
struct iseq_inline_constant_cache_entry *entry;
|
New constant caching insn: opt_getconstant_path
Previously YARV bytecode implemented constant caching by having a pair
of instructions, opt_getinlinecache and opt_setinlinecache, wrapping a
series of getconstant calls (with putobject providing supporting
arguments).
This commit replaces that pattern with a new instruction,
opt_getconstant_path, handling both getting/setting the inline cache and
fetching the constant on a cache miss.
This is implemented by storing the full constant path as a
null-terminated array of IDs inside of the IC structure. idNULL is used
to signal an absolute constant reference.
$ ./miniruby --dump=insns -e '::Foo::Bar::Baz'
== disasm: #<ISeq:<main>@-e:1 (1,0)-(1,13)> (catch: FALSE)
0000 opt_getconstant_path <ic:0 ::Foo::Bar::Baz> ( 1)[Li]
0002 leave
The motivation for this is that we had increasingly found the need to
disassemble the instructions between the opt_getinlinecache and
opt_setinlinecache in order to determine the constant we are fetching,
or otherwise store metadata.
This disassembly was done:
* In opt_setinlinecache, to register the IC against the constant names
it is using for granular invalidation.
* In rb_iseq_free, to unregister the IC from the invalidation table.
* In YJIT to find the position of a opt_getinlinecache instruction to
invalidate it when the cache is populated
* In YJIT to register the constant names being used for invalidation.
With this change we no longe need disassemly for these (in fact
rb_iseq_each is now unused), as the list of constant names being
referenced is held in the IC. This should also make it possible to make
more optimizations in the future.
This may also reduce the size of iseqs, as previously each segment
required 32 bytes (on 64-bit platforms) for each constant segment. This
implementation only stores one ID per-segment.
There should be no significant performance change between this and the
previous implementation. Previously opt_getinlinecache was a "leaf"
instruction, but it included a jump (almost always to a separate cache
line). Now opt_getconstant_path is a non-leaf (it may
raise/autoload/call const_missing) but it does not jump. These seem to
even out.
2022-08-10 20:35:48 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* A null-terminated list of ids, used to represent a constant's path
|
|
|
|
* idNULL is used to represent the :: prefix, and 0 is used to donate the end
|
|
|
|
* of the list.
|
|
|
|
*
|
|
|
|
* For example
|
|
|
|
* FOO {rb_intern("FOO"), 0}
|
|
|
|
* FOO::BAR {rb_intern("FOO"), rb_intern("BAR"), 0}
|
|
|
|
* ::FOO {idNULL, rb_intern("FOO"), 0}
|
|
|
|
* ::FOO::BAR {idNULL, rb_intern("FOO"), rb_intern("BAR"), 0}
|
|
|
|
*/
|
|
|
|
const ID *segments;
|
2019-10-12 03:06:41 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
struct iseq_inline_iv_cache_entry {
|
2022-10-03 20:52:40 +03:00
|
|
|
uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
|
2022-12-06 22:52:11 +03:00
|
|
|
ID iv_set_name;
|
2009-07-13 13:30:23 +04:00
|
|
|
};
|
|
|
|
|
2021-06-01 20:34:06 +03:00
|
|
|
struct iseq_inline_cvar_cache_entry {
|
|
|
|
struct rb_cvar_class_tbl_entry *entry;
|
|
|
|
};
|
|
|
|
|
2013-08-20 21:41:13 +04:00
|
|
|
union iseq_inline_storage_entry {
|
|
|
|
struct {
|
|
|
|
struct rb_thread_struct *running_thread;
|
|
|
|
VALUE value;
|
|
|
|
} once;
|
2021-01-04 12:08:25 +03:00
|
|
|
struct iseq_inline_constant_cache ic_cache;
|
2019-10-12 03:06:41 +03:00
|
|
|
struct iseq_inline_iv_cache_entry iv_cache;
|
2013-08-20 21:41:13 +04:00
|
|
|
};
|
|
|
|
|
2015-09-19 20:59:58 +03:00
|
|
|
struct rb_calling_info {
|
2023-07-31 10:04:16 +03:00
|
|
|
const struct rb_call_data *cd;
|
2020-12-14 23:40:38 +03:00
|
|
|
const struct rb_callcache *cc;
|
2016-07-28 14:02:30 +03:00
|
|
|
VALUE block_handler;
|
2015-09-19 20:59:58 +03:00
|
|
|
VALUE recv;
|
|
|
|
int argc;
|
2023-01-12 17:56:29 +03:00
|
|
|
bool kw_splat;
|
Generalize cfunc large array splat fix to fix many additional cases raising SystemStackError
Originally, when 2e7bceb34ea858649e1f975a934ce1894d1f06a6 fixed cfuncs to no
longer use the VM stack for large array splats, it was thought to have fully
fixed Bug #4040, since the issue was fixed for methods defined in Ruby (iseqs)
back in Ruby 2.2.
After additional research, I determined that same issue affects almost all
types of method calls, not just iseq and cfunc calls. There were two main
types of remaining issues, important cases (where large array splat should
work) and pedantic cases (where large array splat raised SystemStackError
instead of ArgumentError).
Important cases:
```ruby
define_method(:a){|*a|}
a(*1380888.times)
def b(*a); end
send(:b, *1380888.times)
:b.to_proc.call(self, *1380888.times)
def d; yield(*1380888.times) end
d(&method(:b))
def self.method_missing(*a); end
not_a_method(*1380888.times)
```
Pedantic cases:
```ruby
def a; end
a(*1380888.times)
def b(_); end
b(*1380888.times)
def c(_=nil); end
c(*1380888.times)
c = Class.new do
attr_accessor :a
alias b a=
end.new
c.a(*1380888.times)
c.b(*1380888.times)
c = Struct.new(:a) do
alias b a=
end.new
c.a(*1380888.times)
c.b(*1380888.times)
```
This patch fixes all usage of CALLER_SETUP_ARG with splatting a large
number of arguments, and required similar fixes to use a temporary
hidden array in three other cases where the VM would use the VM stack
for handling a large number of arguments. However, it is possible
there may be additional cases where splatting a large number
of arguments still causes a SystemStackError.
This has a measurable performance impact, as it requires additional
checks for a large number of arguments in many additional cases.
This change is fairly invasive, as there were many different VM
functions that needed to be modified to support this. To avoid
too much API change, I modified struct rb_calling_info to add a
heap_argv member for storing the array, so I would not have to
thread it through many functions. This struct is always stack
allocated, which helps ensure sure GC doesn't collect it early.
Because of how invasive the changes are, and how rarely large
arrays are actually splatted in Ruby code, the existing test/spec
suites are not great at testing for correct behavior. To try to
find and fix all issues, I tested this in CI with
VM_ARGC_STACK_MAX to -1, ensuring that a temporary array is used
for all array splat method calls. This was very helpful in
finding breaking cases, especially ones involving flagged keyword
hashes.
Fixes [Bug #4040]
Co-authored-by: Jimmy Miller <jimmy.miller@shopify.com>
2023-03-07 02:58:58 +03:00
|
|
|
VALUE heap_argv;
|
2015-09-19 20:59:58 +03:00
|
|
|
};
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 20:59:05 +04:00
|
|
|
|
Generalize cfunc large array splat fix to fix many additional cases raising SystemStackError
Originally, when 2e7bceb34ea858649e1f975a934ce1894d1f06a6 fixed cfuncs to no
longer use the VM stack for large array splats, it was thought to have fully
fixed Bug #4040, since the issue was fixed for methods defined in Ruby (iseqs)
back in Ruby 2.2.
After additional research, I determined that same issue affects almost all
types of method calls, not just iseq and cfunc calls. There were two main
types of remaining issues, important cases (where large array splat should
work) and pedantic cases (where large array splat raised SystemStackError
instead of ArgumentError).
Important cases:
```ruby
define_method(:a){|*a|}
a(*1380888.times)
def b(*a); end
send(:b, *1380888.times)
:b.to_proc.call(self, *1380888.times)
def d; yield(*1380888.times) end
d(&method(:b))
def self.method_missing(*a); end
not_a_method(*1380888.times)
```
Pedantic cases:
```ruby
def a; end
a(*1380888.times)
def b(_); end
b(*1380888.times)
def c(_=nil); end
c(*1380888.times)
c = Class.new do
attr_accessor :a
alias b a=
end.new
c.a(*1380888.times)
c.b(*1380888.times)
c = Struct.new(:a) do
alias b a=
end.new
c.a(*1380888.times)
c.b(*1380888.times)
```
This patch fixes all usage of CALLER_SETUP_ARG with splatting a large
number of arguments, and required similar fixes to use a temporary
hidden array in three other cases where the VM would use the VM stack
for handling a large number of arguments. However, it is possible
there may be additional cases where splatting a large number
of arguments still causes a SystemStackError.
This has a measurable performance impact, as it requires additional
checks for a large number of arguments in many additional cases.
This change is fairly invasive, as there were many different VM
functions that needed to be modified to support this. To avoid
too much API change, I modified struct rb_calling_info to add a
heap_argv member for storing the array, so I would not have to
thread it through many functions. This struct is always stack
allocated, which helps ensure sure GC doesn't collect it early.
Because of how invasive the changes are, and how rarely large
arrays are actually splatted in Ruby code, the existing test/spec
suites are not great at testing for correct behavior. To try to
find and fix all issues, I tested this in CI with
VM_ARGC_STACK_MAX to -1, ensuring that a temporary array is used
for all array splat method calls. This was very helpful in
finding breaking cases, especially ones involving flagged keyword
hashes.
Fixes [Bug #4040]
Co-authored-by: Jimmy Miller <jimmy.miller@shopify.com>
2023-03-07 02:58:58 +03:00
|
|
|
#ifndef VM_ARGC_STACK_MAX
|
|
|
|
#define VM_ARGC_STACK_MAX 128
|
|
|
|
#endif
|
|
|
|
|
|
|
|
# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
|
|
|
|
|
2017-10-27 05:49:30 +03:00
|
|
|
struct rb_execution_context_struct;
|
2015-10-23 20:00:51 +03:00
|
|
|
|
2007-06-24 22:40:13 +04:00
|
|
|
#if 1
|
2016-02-22 10:15:57 +03:00
|
|
|
#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
|
2007-06-24 22:40:13 +04:00
|
|
|
#else
|
2016-02-22 10:15:57 +03:00
|
|
|
#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
|
2007-06-24 22:40:13 +04:00
|
|
|
#endif
|
2016-02-22 10:15:57 +03:00
|
|
|
#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
|
2007-06-24 22:40:13 +04:00
|
|
|
|
2012-06-04 06:49:37 +04:00
|
|
|
typedef struct rb_iseq_location_struct {
|
2017-06-01 03:05:33 +03:00
|
|
|
VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
|
|
|
|
VALUE base_label; /* String */
|
|
|
|
VALUE label; /* String */
|
2022-09-25 11:07:18 +03:00
|
|
|
int first_lineno;
|
2018-11-05 05:13:45 +03:00
|
|
|
int node_id;
|
2018-01-09 11:45:35 +03:00
|
|
|
rb_code_location_t code_location;
|
2012-06-04 06:49:37 +04:00
|
|
|
} rb_iseq_location_t;
|
2012-05-22 12:31:38 +04:00
|
|
|
|
2017-06-01 03:05:33 +03:00
|
|
|
#define PATHOBJ_PATH 0
|
|
|
|
#define PATHOBJ_REALPATH 1
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
pathobj_path(VALUE pathobj)
|
|
|
|
{
|
|
|
|
if (RB_TYPE_P(pathobj, T_STRING)) {
|
|
|
|
return pathobj;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
|
|
|
|
return RARRAY_AREF(pathobj, PATHOBJ_PATH);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
pathobj_realpath(VALUE pathobj)
|
|
|
|
{
|
|
|
|
if (RB_TYPE_P(pathobj, T_STRING)) {
|
|
|
|
return pathobj;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
|
|
|
|
return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-05 07:17:44 +03:00
|
|
|
/* Forward declarations */
|
2023-03-07 10:17:25 +03:00
|
|
|
struct rb_rjit_unit;
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
|
2022-06-18 01:28:14 +03:00
|
|
|
typedef uintptr_t iseq_bits_t;
|
|
|
|
|
2022-07-18 22:38:12 +03:00
|
|
|
#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
|
2022-06-18 01:28:14 +03:00
|
|
|
|
New constant caching insn: opt_getconstant_path
Previously YARV bytecode implemented constant caching by having a pair
of instructions, opt_getinlinecache and opt_setinlinecache, wrapping a
series of getconstant calls (with putobject providing supporting
arguments).
This commit replaces that pattern with a new instruction,
opt_getconstant_path, handling both getting/setting the inline cache and
fetching the constant on a cache miss.
This is implemented by storing the full constant path as a
null-terminated array of IDs inside of the IC structure. idNULL is used
to signal an absolute constant reference.
$ ./miniruby --dump=insns -e '::Foo::Bar::Baz'
== disasm: #<ISeq:<main>@-e:1 (1,0)-(1,13)> (catch: FALSE)
0000 opt_getconstant_path <ic:0 ::Foo::Bar::Baz> ( 1)[Li]
0002 leave
The motivation for this is that we had increasingly found the need to
disassemble the instructions between the opt_getinlinecache and
opt_setinlinecache in order to determine the constant we are fetching,
or otherwise store metadata.
This disassembly was done:
* In opt_setinlinecache, to register the IC against the constant names
it is using for granular invalidation.
* In rb_iseq_free, to unregister the IC from the invalidation table.
* In YJIT to find the position of a opt_getinlinecache instruction to
invalidate it when the cache is populated
* In YJIT to register the constant names being used for invalidation.
With this change we no longe need disassemly for these (in fact
rb_iseq_each is now unused), as the list of constant names being
referenced is held in the IC. This should also make it possible to make
more optimizations in the future.
This may also reduce the size of iseqs, as previously each segment
required 32 bytes (on 64-bit platforms) for each constant segment. This
implementation only stores one ID per-segment.
There should be no significant performance change between this and the
previous implementation. Previously opt_getinlinecache was a "leaf"
instruction, but it included a jump (almost always to a separate cache
line). Now opt_getconstant_path is a non-leaf (it may
raise/autoload/call const_missing) but it does not jump. These seem to
even out.
2022-08-10 20:35:48 +03:00
|
|
|
/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
|
|
|
|
#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
|
|
|
|
|
2022-07-22 10:49:08 +03:00
|
|
|
/* instruction sequence type */
|
2022-07-22 10:57:25 +03:00
|
|
|
enum rb_iseq_type {
|
2022-07-22 10:49:08 +03:00
|
|
|
ISEQ_TYPE_TOP,
|
|
|
|
ISEQ_TYPE_METHOD,
|
|
|
|
ISEQ_TYPE_BLOCK,
|
|
|
|
ISEQ_TYPE_CLASS,
|
|
|
|
ISEQ_TYPE_RESCUE,
|
|
|
|
ISEQ_TYPE_ENSURE,
|
|
|
|
ISEQ_TYPE_EVAL,
|
|
|
|
ISEQ_TYPE_MAIN,
|
|
|
|
ISEQ_TYPE_PLAIN
|
|
|
|
};
|
|
|
|
|
2023-03-12 00:32:58 +03:00
|
|
|
// Attributes specified by Primitive.attr!
|
|
|
|
enum rb_builtin_attr {
|
2023-03-15 01:38:58 +03:00
|
|
|
// The iseq does not call methods.
|
2023-03-12 01:25:11 +03:00
|
|
|
BUILTIN_ATTR_LEAF = 0x01,
|
2023-03-09 19:30:30 +03:00
|
|
|
// This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
|
2024-01-17 04:31:26 +03:00
|
|
|
BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
|
2024-01-23 22:36:23 +03:00
|
|
|
// This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
|
|
|
|
BUILTIN_ATTR_INLINE_BLOCK = 0x04,
|
2023-03-12 00:32:58 +03:00
|
|
|
};
|
|
|
|
|
2023-03-16 20:41:12 +03:00
|
|
|
typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
|
|
|
|
|
2015-07-22 13:55:02 +03:00
|
|
|
struct rb_iseq_constant_body {
|
2022-07-22 10:57:25 +03:00
|
|
|
enum rb_iseq_type type;
|
2015-07-22 01:52:59 +03:00
|
|
|
|
2015-07-22 14:21:21 +03:00
|
|
|
unsigned int iseq_size;
|
2019-04-20 04:19:47 +03:00
|
|
|
VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
|
2012-10-09 09:33:54 +04:00
|
|
|
|
2007-04-25 07:50:00 +04:00
|
|
|
/**
|
2014-11-03 02:14:21 +03:00
|
|
|
* parameter information
|
2007-04-25 07:50:00 +04:00
|
|
|
*
|
2007-06-30 22:04:35 +04:00
|
|
|
* def m(a1, a2, ..., aM, # mandatory
|
2010-08-02 19:53:48 +04:00
|
|
|
* b1=(...), b2=(...), ..., bN=(...), # optional
|
2007-06-30 22:04:35 +04:00
|
|
|
* *c, # rest
|
|
|
|
* d1, d2, ..., dO, # post
|
2012-12-12 01:44:49 +04:00
|
|
|
* e1:(...), e2:(...), ..., eK:(...), # keyword
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 21:02:55 +03:00
|
|
|
* **f, # keyword_rest
|
2012-12-12 01:44:49 +04:00
|
|
|
* &g) # block
|
2007-04-25 07:50:00 +04:00
|
|
|
* =>
|
|
|
|
*
|
2014-11-03 02:14:21 +03:00
|
|
|
* lead_num = M
|
2014-11-03 07:43:07 +03:00
|
|
|
* opt_num = N
|
2014-11-03 02:14:21 +03:00
|
|
|
* rest_start = M+N
|
|
|
|
* post_start = M+N+(*1)
|
|
|
|
* post_num = O
|
|
|
|
* keyword_num = K
|
|
|
|
* block_start = M+N+(*1)+O+K
|
|
|
|
* keyword_bits = M+N+(*1)+O+K+(&1)
|
|
|
|
* size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
|
2007-04-25 07:50:00 +04:00
|
|
|
*/
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2014-11-03 02:14:21 +03:00
|
|
|
struct {
|
|
|
|
struct {
|
|
|
|
unsigned int has_lead : 1;
|
|
|
|
unsigned int has_opt : 1;
|
|
|
|
unsigned int has_rest : 1;
|
|
|
|
unsigned int has_post : 1;
|
|
|
|
unsigned int has_kw : 1;
|
|
|
|
unsigned int has_kwrest : 1;
|
|
|
|
unsigned int has_block : 1;
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2014-11-03 02:14:21 +03:00
|
|
|
unsigned int ambiguous_param0 : 1; /* {|a|} */
|
2019-04-19 16:19:41 +03:00
|
|
|
unsigned int accepts_no_kwarg : 1;
|
Add Module#ruby2_keywords for passing keywords through regular argument splats
This approach uses a flag bit on the final hash object in the regular splat,
as opposed to a previous approach that used a VM frame flag. The hash flag
approach is less invasive, and handles some cases that the VM frame flag
approach does not, such as saving the argument splat array and splatting it
later:
ruby2_keywords def foo(*args)
@args = args
bar
end
def bar
baz(*@args)
end
def baz(*args, **kw)
[args, kw]
end
foo(a:1) #=> [[], {a: 1}]
foo({a: 1}, **{}) #=> [[{a: 1}], {}]
foo({a: 1}) #=> 2.7: [[], {a: 1}] # and warning
foo({a: 1}) #=> 3.0: [[{a: 1}], {}]
It doesn't handle some cases that the VM frame flag handles, such as when
the final hash object is replaced using Hash#merge, but those cases are
probably less common and are unlikely to properly support keyword
argument separation.
Use ruby2_keywords to handle argument delegation in the delegate library.
2019-09-21 19:03:36 +03:00
|
|
|
unsigned int ruby2_keywords: 1;
|
Introduce Allocationless Anonymous Splat Forwarding
Ruby makes it easy to delegate all arguments from one method to another:
```ruby
def f(*args, **kw)
g(*args, **kw)
end
```
Unfortunately, this indirection decreases performance. One reason it
decreases performance is that this allocates an array and a hash per
call to `f`, even if `args` and `kw` are not modified.
Due to Ruby's ability to modify almost anything at runtime, it's
difficult to avoid the array allocation in the general case. For
example, it's not safe to avoid the allocation in a case like this:
```ruby
def f(*args, **kw)
foo(bar)
g(*args, **kw)
end
```
Because `foo` may be `eval` and `bar` may be a string referencing `args`
or `kw`.
To fix this correctly, you need to perform something similar to escape
analysis on the variables. However, there is a case where you can
avoid the allocation without doing escape analysis, and that is when
the splat variables are anonymous:
```ruby
def f(*, **)
g(*, **)
end
```
When splat variables are anonymous, it is not possible to reference
them directly, it is only possible to use them as splats to other
methods. Since that is the case, if `f` is called with a regular
splat and a keyword splat, it can pass the arguments directly to
`g` without copying them, avoiding allocation. For example:
```ruby
def g(a, b:)
a + b
end
def f(*, **)
g(*, **)
end
a = [1]
kw = {b: 2}
f(*a, **kw)
```
I call this technique: Allocationless Anonymous Splat Forwarding.
This is implemented using a couple additional iseq param flags,
anon_rest and anon_kwrest. If anon_rest is set, and an array splat
is passed when calling the method when the array splat can be used
without modification, `setup_parameters_complex` does not duplicate
it. Similarly, if anon_kwest is set, and a keyword splat is passed
when calling the method, `setup_parameters_complex` does not
duplicate it.
2023-12-01 01:58:42 +03:00
|
|
|
unsigned int anon_rest: 1;
|
|
|
|
unsigned int anon_kwrest: 1;
|
2024-03-27 01:29:38 +03:00
|
|
|
unsigned int use_block: 1;
|
Optimized forwarding callers and callees
This patch optimizes forwarding callers and callees. It only optimizes methods that only take `...` as their parameter, and then pass `...` to other calls.
Calls it optimizes look like this:
```ruby
def bar(a) = a
def foo(...) = bar(...) # optimized
foo(123)
```
```ruby
def bar(a) = a
def foo(...) = bar(1, 2, ...) # optimized
foo(123)
```
```ruby
def bar(*a) = a
def foo(...)
list = [1, 2]
bar(*list, ...) # optimized
end
foo(123)
```
All variants of the above but using `super` are also optimized, including a bare super like this:
```ruby
def foo(...)
super
end
```
This patch eliminates intermediate allocations made when calling methods that accept `...`.
We can observe allocation elimination like this:
```ruby
def m
x = GC.stat(:total_allocated_objects)
yield
GC.stat(:total_allocated_objects) - x
end
def bar(a) = a
def foo(...) = bar(...)
def test
m { foo(123) }
end
test
p test # allocates 1 object on master, but 0 objects with this patch
```
```ruby
def bar(a, b:) = a + b
def foo(...) = bar(...)
def test
m { foo(1, b: 2) }
end
test
p test # allocates 2 objects on master, but 0 objects with this patch
```
How does it work?
-----------------
This patch works by using a dynamic stack size when passing forwarded parameters to callees.
The caller's info object (known as the "CI") contains the stack size of the
parameters, so we pass the CI object itself as a parameter to the callee.
When forwarding parameters, the forwarding ISeq uses the caller's CI to determine how much stack to copy, then copies the caller's stack before calling the callee.
The CI at the forwarded call site is adjusted using information from the caller's CI.
I think this description is kind of confusing, so let's walk through an example with code.
```ruby
def delegatee(a, b) = a + b
def delegator(...)
delegatee(...) # CI2 (FORWARDING)
end
def caller
delegator(1, 2) # CI1 (argc: 2)
end
```
Before we call the delegator method, the stack looks like this:
```
Executing Line | Code | Stack
---------------+---------------------------------------+--------
1| def delegatee(a, b) = a + b | self
2| | 1
3| def delegator(...) | 2
4| # |
5| delegatee(...) # CI2 (FORWARDING) |
6| end |
7| |
8| def caller |
-> 9| delegator(1, 2) # CI1 (argc: 2) |
10| end |
```
The ISeq for `delegator` is tagged as "forwardable", so when `caller` calls in
to `delegator`, it writes `CI1` on to the stack as a local variable for the
`delegator` method. The `delegator` method has a special local called `...`
that holds the caller's CI object.
Here is the ISeq disasm fo `delegator`:
```
== disasm: #<ISeq:delegator@-e:1 (1,0)-(1,39)>
local table (size: 1, argc: 0 [opts: 0, rest: -1, post: 0, block: -1, kw: -1@-1, kwrest: -1])
[ 1] "..."@0
0000 putself ( 1)[LiCa]
0001 getlocal_WC_0 "..."@0
0003 send <calldata!mid:delegatee, argc:0, FCALL|FORWARDING>, nil
0006 leave [Re]
```
The local called `...` will contain the caller's CI: CI1.
Here is the stack when we enter `delegator`:
```
Executing Line | Code | Stack
---------------+---------------------------------------+--------
1| def delegatee(a, b) = a + b | self
2| | 1
3| def delegator(...) | 2
-> 4| # | CI1 (argc: 2)
5| delegatee(...) # CI2 (FORWARDING) | cref_or_me
6| end | specval
7| | type
8| def caller |
9| delegator(1, 2) # CI1 (argc: 2) |
10| end |
```
The CI at `delegatee` on line 5 is tagged as "FORWARDING", so it knows to
memcopy the caller's stack before calling `delegatee`. In this case, it will
memcopy self, 1, and 2 to the stack before calling `delegatee`. It knows how much
memory to copy from the caller because `CI1` contains stack size information
(argc: 2).
Before executing the `send` instruction, we push `...` on the stack. The
`send` instruction pops `...`, and because it is tagged with `FORWARDING`, it
knows to memcopy (using the information in the CI it just popped):
```
== disasm: #<ISeq:delegator@-e:1 (1,0)-(1,39)>
local table (size: 1, argc: 0 [opts: 0, rest: -1, post: 0, block: -1, kw: -1@-1, kwrest: -1])
[ 1] "..."@0
0000 putself ( 1)[LiCa]
0001 getlocal_WC_0 "..."@0
0003 send <calldata!mid:delegatee, argc:0, FCALL|FORWARDING>, nil
0006 leave [Re]
```
Instruction 001 puts the caller's CI on the stack. `send` is tagged with
FORWARDING, so it reads the CI and _copies_ the callers stack to this stack:
```
Executing Line | Code | Stack
---------------+---------------------------------------+--------
1| def delegatee(a, b) = a + b | self
2| | 1
3| def delegator(...) | 2
4| # | CI1 (argc: 2)
-> 5| delegatee(...) # CI2 (FORWARDING) | cref_or_me
6| end | specval
7| | type
8| def caller | self
9| delegator(1, 2) # CI1 (argc: 2) | 1
10| end | 2
```
The "FORWARDING" call site combines information from CI1 with CI2 in order
to support passing other values in addition to the `...` value, as well as
perfectly forward splat args, kwargs, etc.
Since we're able to copy the stack from `caller` in to `delegator`'s stack, we
can avoid allocating objects.
I want to do this to eliminate object allocations for delegate methods.
My long term goal is to implement `Class#new` in Ruby and it uses `...`.
I was able to implement `Class#new` in Ruby
[here](https://github.com/ruby/ruby/pull/9289).
If we adopt the technique in this patch, then we can optimize allocating
objects that take keyword parameters for `initialize`.
For example, this code will allocate 2 objects: one for `SomeObject`, and one
for the kwargs:
```ruby
SomeObject.new(foo: 1)
```
If we combine this technique, plus implement `Class#new` in Ruby, then we can
reduce allocations for this common operation.
Co-Authored-By: John Hawthorn <john@hawthorn.email>
Co-Authored-By: Alan Wu <XrXr@users.noreply.github.com>
2024-04-15 20:48:53 +03:00
|
|
|
unsigned int forwardable: 1;
|
2014-11-03 02:14:21 +03:00
|
|
|
} flags;
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2015-07-25 00:44:14 +03:00
|
|
|
unsigned int size;
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2014-11-03 02:14:21 +03:00
|
|
|
int lead_num;
|
|
|
|
int opt_num;
|
|
|
|
int rest_start;
|
|
|
|
int post_start;
|
|
|
|
int post_num;
|
|
|
|
int block_start;
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2015-07-24 23:58:09 +03:00
|
|
|
const VALUE *opt_table; /* (opt_num + 1) entries. */
|
2014-11-03 07:43:07 +03:00
|
|
|
/* opt_num and opt_table:
|
|
|
|
*
|
|
|
|
* def foo o1=e1, o2=e2, ..., oN=eN
|
|
|
|
* #=>
|
|
|
|
* # prologue code
|
|
|
|
* A1: e1
|
|
|
|
* A2: e2
|
|
|
|
* ...
|
|
|
|
* AN: eN
|
|
|
|
* AL: body
|
|
|
|
* opt_num = N
|
|
|
|
* opt_table = [A1, A2, ..., AN, AL]
|
|
|
|
*/
|
2022-07-21 19:23:58 +03:00
|
|
|
|
2015-07-24 23:58:09 +03:00
|
|
|
const struct rb_iseq_param_keyword {
|
2014-11-03 02:14:21 +03:00
|
|
|
int num;
|
|
|
|
int required_num;
|
|
|
|
int bits_start;
|
|
|
|
int rest_start;
|
2015-07-23 12:53:16 +03:00
|
|
|
const ID *table;
|
2019-04-20 04:19:47 +03:00
|
|
|
VALUE *default_values;
|
2014-11-03 02:14:21 +03:00
|
|
|
} *keyword;
|
|
|
|
} param;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2015-07-22 14:21:21 +03:00
|
|
|
rb_iseq_location_t location;
|
|
|
|
|
|
|
|
/* insn info, must be freed */
|
2018-01-01 12:16:27 +03:00
|
|
|
struct iseq_insn_info {
|
|
|
|
const struct iseq_insn_info_entry *body;
|
2018-01-09 17:05:23 +03:00
|
|
|
unsigned int *positions;
|
2018-01-01 12:16:27 +03:00
|
|
|
unsigned int size;
|
2018-01-09 17:05:23 +03:00
|
|
|
#if VM_INSN_INFO_TABLE_IMPL == 2
|
|
|
|
struct succ_index_table *succ_index_table;
|
|
|
|
#endif
|
2018-01-01 12:16:27 +03:00
|
|
|
} insns_info;
|
2015-07-22 14:21:21 +03:00
|
|
|
|
2015-07-23 12:53:16 +03:00
|
|
|
const ID *local_table; /* must free */
|
2015-07-22 14:21:21 +03:00
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
/* catch table */
|
2019-04-20 04:19:47 +03:00
|
|
|
struct iseq_catch_table *catch_table;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/* for child iseq */
|
2015-07-16 16:13:50 +03:00
|
|
|
const struct rb_iseq_struct *parent_iseq;
|
|
|
|
struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
|
2015-07-22 14:21:21 +03:00
|
|
|
|
2022-07-18 22:38:12 +03:00
|
|
|
union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
|
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of
(mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and
mid+argc+flags only requires 64bits. So this patch packed
rb_call_info to VALUE (1 word) on such cases. If we can not
represent it in VALUE, then use imemo_callinfo which contains
conventional callinfo (rb_callinfo, renamed from rb_call_info).
iseq->body->ci_kw_size is removed because all of callinfo is VALUE
size (packed ci or a pointer to imemo_callinfo).
To access ci information, we need to use these functions:
vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci).
struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg.
rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc()
is temporary removed because cd->ci should be marked.
2020-01-08 02:20:36 +03:00
|
|
|
struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
|
2015-09-19 20:59:58 +03:00
|
|
|
|
2018-03-19 21:21:54 +03:00
|
|
|
struct {
|
2018-04-20 03:25:30 +03:00
|
|
|
rb_snum_t flip_count;
|
2021-09-30 10:58:46 +03:00
|
|
|
VALUE script_lines;
|
2018-04-20 03:25:30 +03:00
|
|
|
VALUE coverage;
|
2018-10-20 13:45:55 +03:00
|
|
|
VALUE pc2branchindex;
|
2018-05-09 08:42:06 +03:00
|
|
|
VALUE *original_iseq;
|
2018-03-19 21:21:54 +03:00
|
|
|
} variable;
|
2015-07-22 14:21:21 +03:00
|
|
|
|
2015-07-25 00:44:14 +03:00
|
|
|
unsigned int local_table_size;
|
2022-07-18 22:38:12 +03:00
|
|
|
unsigned int ic_size; // Number of IC caches
|
|
|
|
unsigned int ise_size; // Number of ISE caches
|
|
|
|
unsigned int ivc_size; // Number of IVC caches
|
|
|
|
unsigned int icvarc_size; // Number of ICVARC caches
|
2015-09-19 20:59:58 +03:00
|
|
|
unsigned int ci_size;
|
2016-07-28 14:02:30 +03:00
|
|
|
unsigned int stack_max; /* for stack overflow check */
|
mjit.c: merge MJIT infrastructure
that allows to JIT-compile Ruby methods by generating C code and
using C compiler. See the first comment of mjit.c to know what this
file does.
mjit.c is authored by Vladimir Makarov <vmakarov@redhat.com>.
After he invented great method JIT infrastructure for MRI as MJIT,
Lars Kanis <lars@greiz-reinsdorf.de> sent the patch to support MinGW
in MJIT. In addition to merging it, I ported pthread to Windows native
threads. Now this MJIT infrastructure can be compiled on Visual Studio.
This commit simplifies mjit.c to decrease code at initial merge. For
example, this commit does not provide multiple JIT threads support.
We can resurrect them later if we really want them, but I wanted to minimize
diff to make it easier to review this patch.
`/tmp/_mjitXXX` file is renamed to `/tmp/_ruby_mjitXXX` because non-Ruby
developers may not know the name "mjit" and the file name should make
sure it's from Ruby and not from some harmful programs. TODO: it may be
better to store this to some temporary directory which Ruby is already using
by Tempfile, if it's not bad for performance.
mjit.h: New. It has `mjit_exec` interface similar to `vm_exec`, which is
for triggering MJIT. This drops interface for AOT compared to the original
MJIT.
Makefile.in: define macros to let MJIT know the path of MJIT header.
Probably we can refactor this to reduce the number of macros (TODO).
win32/Makefile.sub: ditto.
common.mk: compile mjit.o and mjit_compile.o. Unlike original MJIT, this
commit separates MJIT infrastructure and JIT compiler code as independent
object files. As initial patch is NOT going to have ultra-fast JIT compiler,
it's likely to replace JIT compiler, e.g. original MJIT's compiler or some
future JIT impelementations which are not public now.
inits.c: define MJIT module. This is added because `MJIT.enabled?` was
necessary for testing.
test/lib/zombie_hunter.rb: skip if `MJIT.enabled?`. Obviously this
wouldn't work with current code when JIT is enabled.
test/ruby/test_io.rb: skip this too. This would make no sense with MJIT.
ruby.c: define MJIT CLI options. As major difference from original MJIT,
"-j:l"/"--jit:llvm" are renamed to "--jit-cc" because I want to support
not only gcc/clang but also cl.exe (Visual Studio) in the future. But it
takes only "--jit-cc=gcc", "--jit-cc=clang" for now. And only long "--jit"
options are allowed since some Ruby committers preferred it at Ruby
developers Meeting on January, and some of options are renamed.
This file also triggers to initialize MJIT thread and variables.
eval.c: finalize MJIT worker thread and variables.
test/ruby/test_rubyoptions.rb: fix number of CLI options for --jit.
thread_pthread.c: change for pthread abstraction in MJIT. Prefix rb_ for
functions which are used by other files.
thread_win32.c: ditto, for Windows. Those pthread porting is one of major
works that YARV-MJIT created, which is my fork of MJIT, in Feature 14235.
thread.c: follow rb_ prefix changes
vm.c: trigger MJIT call on VM invocation. Also trigger `mjit_mark` to avoid
SEGV by race between JIT and GC of ISeq. The improvement was provided by
wanabe <s.wanabe@gmail.com>.
In JIT compiler I created and am going to add in my next commit, I found
that having `mjit_exec` after `vm_loop_start:` is harmful because the
JIT-ed function doesn't proceed other ISeqs on RESTORE_REGS of leave insn.
Executing non-FINISH frame is unexpected for my JIT compiler and
`exception_handler` triggers executions of such ISeqs. So `mjit_exec`
here should be executed only when it directly comes from `vm_exec` call.
`RubyVM::MJIT` module and `.enabled?` method is added so that we can skip
some tests which don't expect JIT threads or compiler file descriptors.
vm_insnhelper.h: trigger MJIT on method calls during VM execution.
vm_core.h: add fields required for mjit.c. `bp` must be `cfp[6]` because
rb_control_frame_struct is likely to be casted to another struct. The
last position is the safest place to add the new field.
vm_insnhelper.c: save initial value of cfp->ep as cfp->bp. This is an
optimization which are done in both MJIT and YARV-MJIT. So this change
is added in this commit. Calculating bp from ep is a little heavy work,
so bp is kind of cache for it.
iseq.c: notify ISeq GC to MJIT. We should know which iseq in MJIT queue
is GCed to avoid SEGV. TODO: unload some GCed units in some safe way.
gc.c: add hooks so that MJIT can wait GC, and vice versa. Simultaneous
JIT and GC executions may cause SEGV and so we should synchronize them.
cont.c: save continuation information in MJIT worker. As MJIT shouldn't
unload JIT-ed code which is being used, MJIT wants to know full list of
saved execution contexts for continuation and detect ISeqs in use.
mjit_compile.c: added empty JIT compiler so that you can reuse this commit
to build your own JIT compiler. This commit tries to compile ISeqs but
all of them are considered as not supported in this commit. So you can't
use JIT compiler in this commit yet while we added --jit option now.
Patch author: Vladimir Makarov <vmakarov@redhat.com>.
Contributors:
Takashi Kokubun <takashikkbn@gmail.com>.
wanabe <s.wanabe@gmail.com>.
Lars Kanis <lars@greiz-reinsdorf.de>.
Part of Feature 12589 and 14235.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62189 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 09:58:09 +03:00
|
|
|
|
2023-03-12 00:32:58 +03:00
|
|
|
unsigned int builtin_attrs; // Union of rb_builtin_attr
|
2022-12-01 22:51:18 +03:00
|
|
|
|
2024-02-12 22:40:07 +03:00
|
|
|
bool prism; // ISEQ was generated from prism compiler
|
|
|
|
|
2022-12-01 22:51:18 +03:00
|
|
|
union {
|
|
|
|
iseq_bits_t * list; /* Find references for GC */
|
|
|
|
iseq_bits_t single;
|
|
|
|
} mark_bits;
|
|
|
|
|
2020-10-23 07:27:21 +03:00
|
|
|
struct rb_id_table *outer_variables;
|
2019-03-18 04:42:20 +03:00
|
|
|
|
`Primitive.mandatory_only?` for fast path
Compare with the C methods, A built-in methods written in Ruby is
slower if only mandatory parameters are given because it needs to
check the argumens and fill default values for optional and keyword
parameters (C methods can check the number of parameters with `argc`,
so there are no overhead). Passing mandatory arguments are common
(optional arguments are exceptional, in many cases) so it is important
to provide the fast path for such common cases.
`Primitive.mandatory_only?` is a special builtin function used with
`if` expression like that:
```ruby
def self.at(time, subsec = false, unit = :microsecond, in: nil)
if Primitive.mandatory_only?
Primitive.time_s_at1(time)
else
Primitive.time_s_at(time, subsec, unit, Primitive.arg!(:in))
end
end
```
and it makes two ISeq,
```
def self.at(time, subsec = false, unit = :microsecond, in: nil)
Primitive.time_s_at(time, subsec, unit, Primitive.arg!(:in))
end
def self.at(time)
Primitive.time_s_at1(time)
end
```
and (2) is pointed by (1). Note that `Primitive.mandatory_only?`
should be used only in a condition of an `if` statement and the
`if` statement should be equal to the methdo body (you can not
put any expression before and after the `if` statement).
A method entry with `mandatory_only?` (`Time.at` on the above case)
is marked as `iseq_overload`. When the method will be dispatch only
with mandatory arguments (`Time.at(0)` for example), make another
method entry with ISeq (2) as mandatory only method entry and it
will be cached in an inline method cache.
The idea is similar discussed in https://bugs.ruby-lang.org/issues/16254
but it only checks mandatory parameters or more, because many cases
only mandatory parameters are given. If we find other cases (optional
or keyword parameters are used frequently and it hurts performance),
we can extend the feature.
2021-11-12 20:12:20 +03:00
|
|
|
const rb_iseq_t *mandatory_only_iseq;
|
|
|
|
|
2023-03-07 10:15:30 +03:00
|
|
|
#if USE_RJIT || USE_YJIT
|
2023-08-09 02:06:22 +03:00
|
|
|
// Function pointer for JIT code on jit_exec()
|
|
|
|
rb_jit_func_t jit_entry;
|
|
|
|
// Number of calls on jit_exec()
|
|
|
|
long unsigned jit_entry_calls;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if USE_YJIT
|
|
|
|
// Function pointer for JIT code on jit_exec_exception()
|
|
|
|
rb_jit_func_t jit_exception;
|
|
|
|
// Number of calls on jit_exec_exception()
|
|
|
|
long unsigned jit_exception_calls;
|
2022-12-09 10:41:12 +03:00
|
|
|
#endif
|
2022-12-09 10:48:30 +03:00
|
|
|
|
2023-03-07 10:15:30 +03:00
|
|
|
#if USE_RJIT
|
|
|
|
// RJIT stores some data on each iseq.
|
2023-03-07 10:17:25 +03:00
|
|
|
VALUE rjit_blocks;
|
2018-10-20 09:53:00 +03:00
|
|
|
#endif
|
2021-02-13 01:12:18 +03:00
|
|
|
|
Rust YJIT
In December 2021, we opened an [issue] to solicit feedback regarding the
porting of the YJIT codebase from C99 to Rust. There were some
reservations, but this project was given the go ahead by Ruby core
developers and Matz. Since then, we have successfully completed the port
of YJIT to Rust.
The new Rust version of YJIT has reached parity with the C version, in
that it passes all the CRuby tests, is able to run all of the YJIT
benchmarks, and performs similarly to the C version (because it works
the same way and largely generates the same machine code). We've even
incorporated some design improvements, such as a more fine-grained
constant invalidation mechanism which we expect will make a big
difference in Ruby on Rails applications.
Because we want to be careful, YJIT is guarded behind a configure
option:
```shell
./configure --enable-yjit # Build YJIT in release mode
./configure --enable-yjit=dev # Build YJIT in dev/debug mode
```
By default, YJIT does not get compiled and cargo/rustc is not required.
If YJIT is built in dev mode, then `cargo` is used to fetch development
dependencies, but when building in release, `cargo` is not required,
only `rustc`. At the moment YJIT requires Rust 1.60.0 or newer.
The YJIT command-line options remain mostly unchanged, and more details
about the build process are documented in `doc/yjit/yjit.md`.
The CI tests have been updated and do not take any more resources than
before.
The development history of the Rust port is available at the following
commit for interested parties:
https://github.com/Shopify/ruby/commit/1fd9573d8b4b65219f1c2407f30a0a60e537f8be
Our hope is that Rust YJIT will be compiled and included as a part of
system packages and compiled binaries of the Ruby 3.2 release. We do not
anticipate any major problems as Rust is well supported on every
platform which YJIT supports, but to make sure that this process works
smoothly, we would like to reach out to those who take care of building
systems packages before the 3.2 release is shipped and resolve any
issues that may come up.
[issue]: https://bugs.ruby-lang.org/issues/18481
Co-authored-by: Maxime Chevalier-Boisvert <maximechevalierb@gmail.com>
Co-authored-by: Noah Gibbs <the.codefolio.guy@gmail.com>
Co-authored-by: Kevin Newton <kddnewton@gmail.com>
2022-04-19 21:40:21 +03:00
|
|
|
#if USE_YJIT
|
|
|
|
// YJIT stores some data on each iseq.
|
|
|
|
void *yjit_payload;
|
2023-10-12 17:05:34 +03:00
|
|
|
// Used to estimate how frequently this ISEQ gets called
|
|
|
|
uint64_t yjit_calls_at_interv;
|
Rust YJIT
In December 2021, we opened an [issue] to solicit feedback regarding the
porting of the YJIT codebase from C99 to Rust. There were some
reservations, but this project was given the go ahead by Ruby core
developers and Matz. Since then, we have successfully completed the port
of YJIT to Rust.
The new Rust version of YJIT has reached parity with the C version, in
that it passes all the CRuby tests, is able to run all of the YJIT
benchmarks, and performs similarly to the C version (because it works
the same way and largely generates the same machine code). We've even
incorporated some design improvements, such as a more fine-grained
constant invalidation mechanism which we expect will make a big
difference in Ruby on Rails applications.
Because we want to be careful, YJIT is guarded behind a configure
option:
```shell
./configure --enable-yjit # Build YJIT in release mode
./configure --enable-yjit=dev # Build YJIT in dev/debug mode
```
By default, YJIT does not get compiled and cargo/rustc is not required.
If YJIT is built in dev mode, then `cargo` is used to fetch development
dependencies, but when building in release, `cargo` is not required,
only `rustc`. At the moment YJIT requires Rust 1.60.0 or newer.
The YJIT command-line options remain mostly unchanged, and more details
about the build process are documented in `doc/yjit/yjit.md`.
The CI tests have been updated and do not take any more resources than
before.
The development history of the Rust port is available at the following
commit for interested parties:
https://github.com/Shopify/ruby/commit/1fd9573d8b4b65219f1c2407f30a0a60e537f8be
Our hope is that Rust YJIT will be compiled and included as a part of
system packages and compiled binaries of the Ruby 3.2 release. We do not
anticipate any major problems as Rust is well supported on every
platform which YJIT supports, but to make sure that this process works
smoothly, we would like to reach out to those who take care of building
systems packages before the 3.2 release is shipped and resolve any
issues that may come up.
[issue]: https://bugs.ruby-lang.org/issues/18481
Co-authored-by: Maxime Chevalier-Boisvert <maximechevalierb@gmail.com>
Co-authored-by: Noah Gibbs <the.codefolio.guy@gmail.com>
Co-authored-by: Kevin Newton <kddnewton@gmail.com>
2022-04-19 21:40:21 +03:00
|
|
|
#endif
|
2015-07-22 13:55:02 +03:00
|
|
|
};
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2015-07-22 01:52:59 +03:00
|
|
|
/* T_IMEMO/iseq */
|
|
|
|
/* typedef rb_iseq_t is in method.h */
|
|
|
|
struct rb_iseq_struct {
|
2018-12-06 13:52:27 +03:00
|
|
|
VALUE flags; /* 1 */
|
|
|
|
VALUE wrapper; /* 2 */
|
|
|
|
|
|
|
|
struct rb_iseq_constant_body *body; /* 3 */
|
2015-12-08 16:58:50 +03:00
|
|
|
|
|
|
|
union { /* 4, 5 words */
|
|
|
|
struct iseq_compile_data *compile_data; /* used at compile time */
|
|
|
|
|
|
|
|
struct {
|
|
|
|
VALUE obj;
|
|
|
|
int index;
|
|
|
|
} loader;
|
2017-11-18 12:39:41 +03:00
|
|
|
|
2018-12-06 13:52:27 +03:00
|
|
|
struct {
|
|
|
|
struct rb_hook_list_struct *local_hooks;
|
|
|
|
rb_event_flag_t global_trace_events;
|
|
|
|
} exec;
|
2015-12-08 16:58:50 +03:00
|
|
|
} aux;
|
2015-07-22 01:52:59 +03:00
|
|
|
};
|
|
|
|
|
2022-03-23 22:19:48 +03:00
|
|
|
#define ISEQ_BODY(iseq) ((iseq)->body)
|
|
|
|
|
2023-06-30 11:52:21 +03:00
|
|
|
#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
|
2015-12-18 10:13:16 +03:00
|
|
|
#define USE_LAZY_LOAD 0
|
2015-12-08 16:58:50 +03:00
|
|
|
#endif
|
|
|
|
|
2023-07-01 09:14:27 +03:00
|
|
|
#if !USE_LAZY_LOAD
|
|
|
|
static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
|
|
|
|
#endif
|
2015-12-08 16:58:50 +03:00
|
|
|
const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
|
|
|
|
|
|
|
|
static inline const rb_iseq_t *
|
|
|
|
rb_iseq_check(const rb_iseq_t *iseq)
|
|
|
|
{
|
2023-06-30 11:52:21 +03:00
|
|
|
if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
|
2015-12-08 16:58:50 +03:00
|
|
|
rb_iseq_complete((rb_iseq_t *)iseq);
|
|
|
|
}
|
|
|
|
return iseq;
|
|
|
|
}
|
|
|
|
|
2018-10-30 17:57:03 +03:00
|
|
|
static inline const rb_iseq_t *
|
2019-10-03 06:26:41 +03:00
|
|
|
def_iseq_ptr(rb_method_definition_t *def)
|
2018-10-30 17:57:03 +03:00
|
|
|
{
|
2019-10-25 11:58:54 +03:00
|
|
|
//TODO: re-visit. to check the bug, enable this assertion.
|
2019-12-17 14:22:00 +03:00
|
|
|
#if VM_CHECK_MODE > 0
|
2018-10-30 17:57:03 +03:00
|
|
|
if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
|
|
|
|
#endif
|
|
|
|
return rb_iseq_check(def->body.iseq.iseqptr);
|
|
|
|
}
|
|
|
|
|
2008-06-15 13:17:06 +04:00
|
|
|
enum ruby_special_exceptions {
|
|
|
|
ruby_error_reenter,
|
|
|
|
ruby_error_nomemory,
|
|
|
|
ruby_error_sysstack,
|
2017-08-21 09:46:46 +03:00
|
|
|
ruby_error_stackfatal,
|
2017-04-09 05:34:49 +03:00
|
|
|
ruby_error_stream_closed,
|
2008-06-15 13:17:06 +04:00
|
|
|
ruby_special_error_count
|
|
|
|
};
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
#define GetVMPtr(obj, ptr) \
|
2011-01-21 18:54:58 +03:00
|
|
|
GetCoreDataFromValue((obj), rb_vm_t, (ptr))
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2016-04-04 17:37:07 +03:00
|
|
|
struct rb_vm_struct;
|
|
|
|
typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
|
|
|
|
|
|
|
|
typedef struct rb_at_exit_list {
|
|
|
|
rb_vm_at_exit_func *func;
|
|
|
|
struct rb_at_exit_list *next;
|
|
|
|
} rb_at_exit_list;
|
|
|
|
|
2024-05-03 19:00:24 +03:00
|
|
|
void *rb_objspace_alloc(void);
|
|
|
|
void rb_objspace_free(void *objspace);
|
|
|
|
void rb_objspace_call_finalizer(void);
|
2009-09-18 11:29:17 +04:00
|
|
|
|
2012-08-16 15:41:24 +04:00
|
|
|
typedef struct rb_hook_list_struct {
|
|
|
|
struct rb_event_hook_struct *hooks;
|
|
|
|
rb_event_flag_t events;
|
2018-11-26 21:16:39 +03:00
|
|
|
unsigned int running;
|
2021-12-12 20:15:05 +03:00
|
|
|
bool need_clean;
|
|
|
|
bool is_local;
|
2012-08-16 15:41:24 +04:00
|
|
|
} rb_hook_list_t;
|
|
|
|
|
2019-11-07 10:58:00 +03:00
|
|
|
|
|
|
|
// see builtin.h for definition
|
|
|
|
typedef const struct rb_builtin_function *RB_BUILTIN;
|
|
|
|
|
2024-03-13 20:58:03 +03:00
|
|
|
struct global_object_list {
|
|
|
|
VALUE *varptr;
|
|
|
|
struct global_object_list *next;
|
|
|
|
};
|
|
|
|
|
2008-10-22 00:59:23 +04:00
|
|
|
typedef struct rb_vm_struct {
|
2006-12-31 18:02:22 +03:00
|
|
|
VALUE self;
|
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
struct {
|
2022-03-30 10:36:31 +03:00
|
|
|
struct ccan_list_head set;
|
2020-03-09 20:22:11 +03:00
|
|
|
unsigned int cnt;
|
|
|
|
unsigned int blocking_cnt;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
struct rb_ractor_struct *main_ractor;
|
|
|
|
struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
|
2018-08-14 00:34:20 +03:00
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
struct {
|
|
|
|
// monitor
|
|
|
|
rb_nativethread_lock_t lock;
|
|
|
|
struct rb_ractor_struct *lock_owner;
|
|
|
|
unsigned int lock_rec;
|
|
|
|
|
|
|
|
// join at exit
|
|
|
|
rb_nativethread_cond_t terminate_cond;
|
|
|
|
bool terminate_waiting;
|
2023-04-10 04:53:13 +03:00
|
|
|
|
|
|
|
#ifndef RUBY_THREAD_PTHREAD_H
|
|
|
|
bool barrier_waiting;
|
|
|
|
unsigned int barrier_cnt;
|
|
|
|
rb_nativethread_cond_t barrier_cond;
|
|
|
|
#endif
|
2020-03-09 20:22:11 +03:00
|
|
|
} sync;
|
2023-04-10 04:53:13 +03:00
|
|
|
|
|
|
|
// ractor scheduling
|
|
|
|
struct {
|
|
|
|
rb_nativethread_lock_t lock;
|
|
|
|
struct rb_ractor_struct *lock_owner;
|
|
|
|
bool locked;
|
|
|
|
|
|
|
|
rb_nativethread_cond_t cond; // GRQ
|
|
|
|
unsigned int snt_cnt; // count of shared NTs
|
|
|
|
unsigned int dnt_cnt; // count of dedicated NTs
|
|
|
|
|
|
|
|
unsigned int running_cnt;
|
|
|
|
|
|
|
|
unsigned int max_cpu;
|
|
|
|
struct ccan_list_head grq; // // Global Ready Queue
|
|
|
|
unsigned int grq_cnt;
|
|
|
|
|
|
|
|
// running threads
|
|
|
|
struct ccan_list_head running_threads;
|
|
|
|
|
|
|
|
// threads which switch context by timeslice
|
|
|
|
struct ccan_list_head timeslice_threads;
|
|
|
|
|
|
|
|
struct ccan_list_head zombie_threads;
|
|
|
|
|
|
|
|
// true if timeslice timer is not enable
|
|
|
|
bool timeslice_wait_inf;
|
|
|
|
|
|
|
|
// barrier
|
|
|
|
rb_nativethread_cond_t barrier_complete_cond;
|
|
|
|
rb_nativethread_cond_t barrier_release_cond;
|
|
|
|
bool barrier_waiting;
|
|
|
|
unsigned int barrier_waiting_cnt;
|
|
|
|
unsigned int barrier_serial;
|
|
|
|
} sched;
|
2020-03-09 20:22:11 +03:00
|
|
|
} ractor;
|
2018-08-14 00:34:20 +03:00
|
|
|
|
2018-04-21 00:38:27 +03:00
|
|
|
#ifdef USE_SIGALTSTACK
|
|
|
|
void *main_altstack;
|
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2018-04-20 06:22:26 +03:00
|
|
|
rb_serial_t fork_gen;
|
2022-03-30 10:36:31 +03:00
|
|
|
struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2019-01-04 16:14:11 +03:00
|
|
|
/* set in single-threaded processes only: */
|
|
|
|
volatile int ubf_async_safe;
|
|
|
|
|
2016-05-17 20:24:34 +03:00
|
|
|
unsigned int running: 1;
|
|
|
|
unsigned int thread_abort_on_exception: 1;
|
2016-06-06 03:25:38 +03:00
|
|
|
unsigned int thread_report_on_exception: 1;
|
2020-10-29 01:27:00 +03:00
|
|
|
unsigned int thread_ignore_deadlock: 1;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/* object management */
|
|
|
|
VALUE mark_object_ary;
|
2024-03-14 20:52:20 +03:00
|
|
|
struct global_object_list *global_object_list;
|
2014-09-11 14:53:48 +04:00
|
|
|
const VALUE special_exceptions[ruby_special_error_count];
|
2008-06-15 13:17:06 +04:00
|
|
|
|
2007-02-14 05:19:02 +03:00
|
|
|
/* load */
|
* blockinlining.c: remove "yarv" prefix.
* array.c, numeric.c: ditto.
* insnhelper.ci, insns.def, vm_evalbody.ci: ditto.
* yarvcore.c: removed.
* yarvcore.h: renamed to core.h.
* cont.c, debug.c, error.c, process.c, signal.c : ditto.
* ext/probeprofiler/probeprofiler.c: ditto.
* id.c, id.h: added.
* inits.c: ditto.
* compile.c: rename internal functions.
* compile.h: fix debug flag.
* eval.c, object.c, vm.c: remove ruby_top_self.
use rb_vm_top_self() instead.
* eval_intern.h, eval_load: ditto.
* gc.c: rename yarv_machine_stack_mark() to
rb_gc_mark_machine_stack().
* insnhelper.h: remove unused macros.
* iseq.c: add iseq_compile() to create iseq object
from source string.
* proc.c: rename a internal function.
* template/insns.inc.tmpl: remove YARV prefix.
* thread.c:
* vm.c (rb_iseq_eval): added.
* vm.c: move some functions from yarvcore.c.
* vm_dump.c: fix to remove compiler warning.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12741 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-07-12 08:25:46 +04:00
|
|
|
VALUE top_self;
|
2008-04-30 13:03:03 +04:00
|
|
|
VALUE load_path;
|
2012-11-05 19:27:05 +04:00
|
|
|
VALUE load_path_snapshot;
|
2012-11-05 19:27:08 +04:00
|
|
|
VALUE load_path_check_cache;
|
2012-11-05 19:27:05 +04:00
|
|
|
VALUE expanded_load_path;
|
2007-02-14 05:19:02 +03:00
|
|
|
VALUE loaded_features;
|
2012-11-05 19:27:01 +04:00
|
|
|
VALUE loaded_features_snapshot;
|
Do not load file with same realpath twice when requiring
This fixes issues with paths being loaded twice in certain cases
when symlinks are used.
It took me multiple attempts to get this working. My original
attempt tried to convert paths to realpaths before adding them
to $LOADED_FEATURES. Unfortunately, this doesn't work well
with the loaded feature index, which is based off load paths
and not realpaths. While I was able to get require working, I'm
fairly sure the loaded feature index was not being used as
expected, which would have significant performance implications.
Additionally, I was never able to get that approach working with
autoload when autoloading a non-realpath file. It also broke
some specs.
This takes a more conservative approach. Directly before loading the
file, if the file with the same realpath has been required, the
loading of the file is skipped. The realpaths are stored as
fstrings in a hidden hash.
When rebuilding the loaded feature index, the hash of realpaths
is also rebuilt. I'm guessing this makes rebuilding process
slower, but I don think that is a hot path. In general, modifying
loaded features is only done when reloading, and that tends to be
in non-production environments.
Change test_require_with_loaded_features_pop test to use 30 threads
and 300 iterations, instead of 4 threads and 1000 iterations.
I saw only sporadic failures with 4/1000, but consistent failures
30/300 threads. These failures were due to the fact that the
concurrent deletions from $LOADED_FEATURES in other threads can
result in rb_ary_entry returning nil when rebuilding the loaded
features index.
To avoid concurrency issues when rebuilding the loaded features
index, the building of the index itself is left alone, and
afterwards, a separate loop is done on a copy of the loaded feature
snapshot in order to rebuild the realpaths hash.
Fixes [Bug #17885]
2021-06-30 23:50:19 +03:00
|
|
|
VALUE loaded_features_realpaths;
|
2023-04-12 22:33:16 +03:00
|
|
|
VALUE loaded_features_realpath_map;
|
2013-03-22 12:38:51 +04:00
|
|
|
struct st_table *loaded_features_index;
|
2007-05-03 17:19:11 +04:00
|
|
|
struct st_table *loading_table;
|
2022-11-18 01:33:18 +03:00
|
|
|
// For running the init function of statically linked
|
|
|
|
// extensions when they are loaded
|
|
|
|
struct st_table *static_ext_inits;
|
2008-12-09 10:17:10 +03:00
|
|
|
|
2007-02-14 05:19:02 +03:00
|
|
|
/* signal */
|
2014-07-16 14:16:34 +04:00
|
|
|
struct {
|
2017-07-27 15:17:56 +03:00
|
|
|
VALUE cmd[RUBY_NSIG];
|
|
|
|
} trap_list;
|
2007-04-19 14:37:08 +04:00
|
|
|
|
2013-11-15 21:15:31 +04:00
|
|
|
/* relation table of ensure - rollback for callcc */
|
|
|
|
struct st_table *ensure_rollback_table;
|
|
|
|
|
Change the semantics of rb_postponed_job_register
Our current implementation of rb_postponed_job_register suffers from
some safety issues that can lead to interpreter crashes (see bug #1991).
Essentially, the issue is that jobs can be called with the wrong
arguments.
We made two attempts to fix this whilst keeping the promised semantics,
but:
* The first one involved masking/unmasking when flushing jobs, which
was believed to be too expensive
* The second one involved a lock-free, multi-producer, single-consumer
ringbuffer, which was too complex
The critical insight behind this third solution is that essentially the
only user of these APIs are a) internal, or b) profiling gems.
For a), none of the usages actually require variable data; they will
work just fine with the preregistration interface.
For b), generally profiling gems only call a single callback with a
single piece of data (which is actually usually just zero) for the life
of the program. The ringbuffer is complex because it needs to support
multi-word inserts of job & data (which can't be atomic); but nobody
actually even needs that functionality, really.
So, this comit:
* Introduces a pre-registration API for jobs, with a GVL-requiring
rb_postponed_job_prereigster, which returns a handle which can be
used with an async-signal-safe rb_postponed_job_trigger.
* Deprecates rb_postponed_job_register (and re-implements it on top of
the preregister function for compatability)
* Moves all the internal usages of postponed job register
pre-registration
2023-11-19 14:54:57 +03:00
|
|
|
/* postponed_job (async-signal-safe, and thread-safe) */
|
|
|
|
struct rb_postponed_job_queue *postponed_job_queue;
|
2013-05-27 01:30:44 +04:00
|
|
|
|
2008-06-09 08:20:07 +04:00
|
|
|
int src_encoding_index;
|
|
|
|
|
2018-11-30 06:56:29 +03:00
|
|
|
/* workqueue (thread-safe, NOT async-signal-safe) */
|
2022-03-30 10:36:31 +03:00
|
|
|
struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
|
2018-11-30 06:56:29 +03:00
|
|
|
rb_nativethread_lock_t workqueue_lock;
|
|
|
|
|
Some global variables can be accessed from ractors
Some global variables should be used from non-main Ractors.
[Bug #17268]
```ruby
# ractor-local (derived from created ractor): debug
'$DEBUG' => $DEBUG,
'$-d' => $-d,
# ractor-local (derived from created ractor): verbose
'$VERBOSE' => $VERBOSE,
'$-w' => $-w,
'$-W' => $-W,
'$-v' => $-v,
# process-local (readonly): other commandline parameters
'$-p' => $-p,
'$-l' => $-l,
'$-a' => $-a,
# process-local (readonly): getpid
'$$' => $$,
# thread local: process result
'$?' => $?,
# scope local: match
'$~' => $~.inspect,
'$&' => $&,
'$`' => $`,
'$\'' => $',
'$+' => $+,
'$1' => $1,
# scope local: last line
'$_' => $_,
# scope local: last backtrace
'$@' => $@,
'$!' => $!,
# ractor local: stdin, out, err
'$stdin' => $stdin.inspect,
'$stdout' => $stdout.inspect,
'$stderr' => $stderr.inspect,
```
2020-10-20 04:46:43 +03:00
|
|
|
VALUE orig_progname, progname;
|
2021-10-25 14:00:51 +03:00
|
|
|
VALUE coverages, me2counter;
|
2017-09-03 17:26:06 +03:00
|
|
|
int coverage_mode;
|
2008-06-09 09:18:03 +04:00
|
|
|
|
2008-04-27 07:20:35 +04:00
|
|
|
struct rb_objspace *objspace;
|
2010-12-02 14:06:32 +03:00
|
|
|
|
2016-04-04 17:37:07 +03:00
|
|
|
rb_at_exit_list *at_exit;
|
2012-09-24 12:36:53 +04:00
|
|
|
|
2014-08-29 10:30:03 +04:00
|
|
|
st_table *frozen_strings;
|
2012-12-20 02:29:18 +04:00
|
|
|
|
2019-11-07 10:58:00 +03:00
|
|
|
const struct rb_builtin_function *builtin_function_table;
|
|
|
|
|
2024-02-12 08:43:38 +03:00
|
|
|
st_table *ci_table;
|
2020-12-12 23:55:18 +03:00
|
|
|
struct rb_id_table *negative_cme_table;
|
2021-12-21 08:06:02 +03:00
|
|
|
st_table *overloaded_cme_table; // cme -> overloaded_cme
|
2024-04-17 13:46:48 +03:00
|
|
|
st_table *unused_block_warning_table;
|
2024-04-19 07:21:55 +03:00
|
|
|
bool unused_block_warning_strict;
|
2020-12-12 23:55:18 +03:00
|
|
|
|
2022-03-31 18:04:25 +03:00
|
|
|
// This id table contains a mapping from ID to ICs. It does this with ID
|
|
|
|
// keys and nested st_tables as values. The nested tables have ICs as keys
|
|
|
|
// and Qtrue as values. It is used when inline constant caches need to be
|
|
|
|
// invalidated or ISEQs are being freed.
|
|
|
|
struct rb_id_table *constant_cache;
|
|
|
|
|
2021-01-20 21:33:59 +03:00
|
|
|
#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
|
|
|
|
#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
|
|
|
|
#endif
|
|
|
|
const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
|
|
|
|
|
2021-04-29 15:31:05 +03:00
|
|
|
#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
|
2020-11-11 08:37:31 +03:00
|
|
|
uint32_t clock;
|
|
|
|
#endif
|
|
|
|
|
2012-12-20 02:29:18 +04:00
|
|
|
/* params */
|
|
|
|
struct { /* size in byte */
|
|
|
|
size_t thread_vm_stack_size;
|
|
|
|
size_t thread_machine_stack_size;
|
|
|
|
size_t fiber_vm_stack_size;
|
|
|
|
size_t fiber_machine_stack_size;
|
|
|
|
} default_params;
|
2014-07-18 05:53:18 +04:00
|
|
|
|
2008-10-22 00:59:23 +04:00
|
|
|
} rb_vm_t;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2012-12-20 02:29:18 +04:00
|
|
|
/* default values */
|
|
|
|
|
|
|
|
#define RUBY_VM_SIZE_ALIGN 4096
|
|
|
|
|
2012-12-25 11:29:36 +04:00
|
|
|
#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
2012-12-20 02:29:18 +04:00
|
|
|
#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
|
|
|
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
|
|
|
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
|
|
|
|
|
|
|
#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
|
|
|
#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
2017-12-05 04:10:15 +03:00
|
|
|
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
|
2022-10-19 13:49:45 +03:00
|
|
|
#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
|
|
|
|
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
|
2017-12-05 04:10:13 +03:00
|
|
|
#else
|
2012-12-20 02:29:18 +04:00
|
|
|
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
2017-12-05 04:10:13 +03:00
|
|
|
#endif
|
2012-12-20 02:29:18 +04:00
|
|
|
|
2019-04-24 12:22:33 +03:00
|
|
|
#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
|
|
|
|
/* It seems sanitizers consume A LOT of machine stacks */
|
|
|
|
#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
|
|
|
|
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
|
|
|
|
#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
|
|
|
|
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
|
|
|
|
#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
|
|
|
|
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
|
|
|
|
#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
|
|
|
|
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
|
|
|
|
#endif
|
|
|
|
|
2012-09-28 08:05:36 +04:00
|
|
|
#ifndef VM_DEBUG_BP_CHECK
|
2013-01-10 17:03:39 +04:00
|
|
|
#define VM_DEBUG_BP_CHECK 0
|
2012-09-28 08:05:36 +04:00
|
|
|
#endif
|
|
|
|
|
2014-09-04 12:50:31 +04:00
|
|
|
#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
|
2019-08-17 17:14:52 +03:00
|
|
|
#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
|
2014-09-04 12:50:31 +04:00
|
|
|
#endif
|
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
struct rb_captured_block {
|
|
|
|
VALUE self;
|
|
|
|
const VALUE *ep;
|
|
|
|
union {
|
|
|
|
const rb_iseq_t *iseq;
|
|
|
|
const struct vm_ifunc *ifunc;
|
|
|
|
VALUE val;
|
|
|
|
} code;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum rb_block_handler_type {
|
|
|
|
block_handler_type_iseq,
|
|
|
|
block_handler_type_ifunc,
|
|
|
|
block_handler_type_symbol,
|
|
|
|
block_handler_type_proc
|
|
|
|
};
|
|
|
|
|
|
|
|
enum rb_block_type {
|
|
|
|
block_type_iseq,
|
|
|
|
block_type_ifunc,
|
|
|
|
block_type_symbol,
|
|
|
|
block_type_proc
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rb_block {
|
|
|
|
union {
|
|
|
|
struct rb_captured_block captured;
|
|
|
|
VALUE symbol;
|
|
|
|
VALUE proc;
|
|
|
|
} as;
|
|
|
|
enum rb_block_type type;
|
|
|
|
};
|
|
|
|
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 20:59:05 +04:00
|
|
|
typedef struct rb_control_frame_struct {
|
2023-08-12 03:50:53 +03:00
|
|
|
const VALUE *pc; // cfp[0]
|
|
|
|
VALUE *sp; // cfp[1]
|
|
|
|
const rb_iseq_t *iseq; // cfp[2]
|
|
|
|
VALUE self; // cfp[3] / block[0]
|
|
|
|
const VALUE *ep; // cfp[4] / block[1]
|
|
|
|
const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
|
|
|
|
void *jit_return; // cfp[6] -- return address for JIT code
|
2012-09-28 08:05:36 +04:00
|
|
|
#if VM_DEBUG_BP_CHECK
|
2023-08-12 03:50:53 +03:00
|
|
|
VALUE *bp_check; // cfp[7]
|
2012-09-28 08:05:36 +04:00
|
|
|
#endif
|
* blockinlining.c, compile.c, compile.h, error.c, eval.c,
eval_intern.h, eval_jump.h, eval_load.c, eval_method.h,
eval_safe.h, gc.c, insnhelper.h, insns.def, iseq.c, proc.c,
process.c, signal.c, thread.c, thread_pthread.ci, thread_win32.ci,
vm.c, vm.h, vm_dump.c, vm_evalbody.ci, vm_macro.def,
yarv.h, yarvcore.h, yarvcore.c: change type and macro names:
* yarv_*_t -> rb_*_t
* yarv_*_struct -> rb_*_struct
* yarv_tag -> rb_vm_tag
* YARV_* -> RUBY_VM_*
* proc.c, vm.c: move functions about env object creation
from proc.c to vm.c.
* proc.c, yarvcore.c: fix rb_cVM initialization place.
* inits.c: change Init_ISeq() order (after Init_VM).
* ruby.h, proc.c: change declaration place of rb_cEnv
from proc.c to ruby.c.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11651 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-06 22:00:03 +03:00
|
|
|
} rb_control_frame_t;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2011-08-16 13:56:56 +04:00
|
|
|
extern const rb_data_type_t ruby_threadptr_data_type;
|
2011-02-04 19:05:40 +03:00
|
|
|
|
2017-06-28 07:49:30 +03:00
|
|
|
static inline struct rb_thread_struct *
|
|
|
|
rb_thread_ptr(VALUE thval)
|
|
|
|
{
|
2017-06-28 07:57:02 +03:00
|
|
|
return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
|
2017-06-28 07:49:30 +03:00
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
|
* blockinlining.c, compile.c, compile.h, error.c, eval.c,
eval_intern.h, eval_jump.h, eval_load.c, eval_method.h,
eval_safe.h, gc.c, insnhelper.h, insns.def, iseq.c, proc.c,
process.c, signal.c, thread.c, thread_pthread.ci, thread_win32.ci,
vm.c, vm.h, vm_dump.c, vm_evalbody.ci, vm_macro.def,
yarv.h, yarvcore.h, yarvcore.c: change type and macro names:
* yarv_*_t -> rb_*_t
* yarv_*_struct -> rb_*_struct
* yarv_tag -> rb_vm_tag
* YARV_* -> RUBY_VM_*
* proc.c, vm.c: move functions about env object creation
from proc.c to vm.c.
* proc.c, yarvcore.c: fix rb_cVM initialization place.
* inits.c: change Init_ISeq() order (after Init_VM).
* ruby.h, proc.c: change declaration place of rb_cEnv
from proc.c to ruby.c.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11651 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-06 22:00:03 +03:00
|
|
|
enum rb_thread_status {
|
2006-12-31 18:02:22 +03:00
|
|
|
THREAD_RUNNABLE,
|
|
|
|
THREAD_STOPPED,
|
2008-06-12 17:01:38 +04:00
|
|
|
THREAD_STOPPED_FOREVER,
|
2008-07-01 12:27:58 +04:00
|
|
|
THREAD_KILLED
|
2006-12-31 18:02:22 +03:00
|
|
|
};
|
|
|
|
|
2019-09-19 10:04:29 +03:00
|
|
|
#ifdef RUBY_JMP_BUF
|
2008-03-31 21:58:41 +04:00
|
|
|
typedef RUBY_JMP_BUF rb_jmpbuf_t;
|
2019-09-19 10:04:29 +03:00
|
|
|
#else
|
|
|
|
typedef void *rb_jmpbuf_t[5];
|
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2023-11-12 01:18:01 +03:00
|
|
|
/*
|
|
|
|
`rb_vm_tag_jmpbuf_t` type represents a buffer used to
|
|
|
|
long jump to a C frame associated with `rb_vm_tag`.
|
|
|
|
|
|
|
|
Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
|
|
|
|
following functions:
|
|
|
|
- `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
|
|
|
|
- `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
|
|
|
|
|
|
|
|
`RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
|
|
|
|
`rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
|
|
|
|
*/
|
|
|
|
#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
|
|
|
|
/*
|
|
|
|
WebAssembly target with Asyncify-based SJLJ needs
|
|
|
|
to capture the execution context by unwind/rewind-ing
|
|
|
|
call frames into a jump buffer. The buffer space tends
|
|
|
|
to be considerably large unlike other architectures'
|
|
|
|
register-based buffers.
|
|
|
|
Therefore, we allocates the buffer on the heap on such
|
|
|
|
environments.
|
|
|
|
*/
|
|
|
|
typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
|
|
|
|
|
|
|
|
#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
|
|
|
|
{
|
2023-11-22 18:23:04 +03:00
|
|
|
*jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
|
2023-11-12 01:18:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
|
|
|
|
{
|
2023-11-22 18:23:04 +03:00
|
|
|
ruby_xfree(*jmpbuf);
|
2023-11-12 01:18:01 +03:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
|
|
|
|
|
|
|
|
#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
|
|
|
|
{
|
2023-11-15 13:05:10 +03:00
|
|
|
// no-op
|
2023-11-12 01:18:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
|
|
|
|
{
|
2023-11-15 13:05:10 +03:00
|
|
|
// no-op
|
2023-11-12 01:18:01 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-05-18 10:49:19 +04:00
|
|
|
/*
|
2017-10-26 14:02:13 +03:00
|
|
|
the members which are written in EC_PUSH_TAG() should be placed at
|
2013-05-18 10:49:19 +04:00
|
|
|
the beginning and the end, so that entire region is accessible.
|
|
|
|
*/
|
* blockinlining.c, compile.c, compile.h, error.c, eval.c,
eval_intern.h, eval_jump.h, eval_load.c, eval_method.h,
eval_safe.h, gc.c, insnhelper.h, insns.def, iseq.c, proc.c,
process.c, signal.c, thread.c, thread_pthread.ci, thread_win32.ci,
vm.c, vm.h, vm_dump.c, vm_evalbody.ci, vm_macro.def,
yarv.h, yarvcore.h, yarvcore.c: change type and macro names:
* yarv_*_t -> rb_*_t
* yarv_*_struct -> rb_*_struct
* yarv_tag -> rb_vm_tag
* YARV_* -> RUBY_VM_*
* proc.c, vm.c: move functions about env object creation
from proc.c to vm.c.
* proc.c, yarvcore.c: fix rb_cVM initialization place.
* inits.c: change Init_ISeq() order (after Init_VM).
* ruby.h, proc.c: change declaration place of rb_cEnv
from proc.c to ruby.c.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11651 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-06 22:00:03 +03:00
|
|
|
struct rb_vm_tag {
|
2006-12-31 18:02:22 +03:00
|
|
|
VALUE tag;
|
|
|
|
VALUE retval;
|
2023-11-12 01:18:01 +03:00
|
|
|
rb_vm_tag_jmpbuf_t buf;
|
* blockinlining.c, compile.c, compile.h, error.c, eval.c,
eval_intern.h, eval_jump.h, eval_load.c, eval_method.h,
eval_safe.h, gc.c, insnhelper.h, insns.def, iseq.c, proc.c,
process.c, signal.c, thread.c, thread_pthread.ci, thread_win32.ci,
vm.c, vm.h, vm_dump.c, vm_evalbody.ci, vm_macro.def,
yarv.h, yarvcore.h, yarvcore.c: change type and macro names:
* yarv_*_t -> rb_*_t
* yarv_*_struct -> rb_*_struct
* yarv_tag -> rb_vm_tag
* YARV_* -> RUBY_VM_*
* proc.c, vm.c: move functions about env object creation
from proc.c to vm.c.
* proc.c, yarvcore.c: fix rb_cVM initialization place.
* inits.c: change Init_ISeq() order (after Init_VM).
* ruby.h, proc.c: change declaration place of rb_cEnv
from proc.c to ruby.c.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11651 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-06 22:00:03 +03:00
|
|
|
struct rb_vm_tag *prev;
|
2017-06-23 12:43:52 +03:00
|
|
|
enum ruby_tag_type state;
|
2020-10-14 08:21:57 +03:00
|
|
|
unsigned int lock_rec;
|
2006-12-31 18:02:22 +03:00
|
|
|
};
|
|
|
|
|
2017-06-23 14:15:26 +03:00
|
|
|
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
|
|
|
|
STATIC_ASSERT(rb_vm_tag_buf_end,
|
2023-11-12 01:18:01 +03:00
|
|
|
offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
|
2017-06-23 14:15:26 +03:00
|
|
|
sizeof(struct rb_vm_tag));
|
|
|
|
|
2008-05-30 05:52:38 +04:00
|
|
|
struct rb_unblock_callback {
|
|
|
|
rb_unblock_function_t *func;
|
|
|
|
void *arg;
|
|
|
|
};
|
|
|
|
|
2008-07-27 19:20:01 +04:00
|
|
|
struct rb_mutex_struct;
|
|
|
|
|
2013-11-15 21:15:31 +04:00
|
|
|
typedef struct rb_ensure_entry {
|
|
|
|
VALUE marker;
|
2019-08-26 09:20:15 +03:00
|
|
|
VALUE (*e_proc)(VALUE);
|
2013-11-15 21:15:31 +04:00
|
|
|
VALUE data2;
|
|
|
|
} rb_ensure_entry_t;
|
|
|
|
|
|
|
|
typedef struct rb_ensure_list {
|
|
|
|
struct rb_ensure_list *next;
|
|
|
|
struct rb_ensure_entry entry;
|
|
|
|
} rb_ensure_list_t;
|
|
|
|
|
2014-10-16 02:35:08 +04:00
|
|
|
typedef struct rb_fiber_struct rb_fiber_t;
|
|
|
|
|
2020-09-21 00:54:08 +03:00
|
|
|
struct rb_waiting_list {
|
|
|
|
struct rb_waiting_list *next;
|
|
|
|
struct rb_thread_struct *thread;
|
|
|
|
struct rb_fiber_struct *fiber;
|
|
|
|
};
|
|
|
|
|
2020-03-19 07:25:53 +03:00
|
|
|
struct rb_execution_context_struct {
|
2017-05-09 08:06:41 +03:00
|
|
|
/* execution information */
|
2017-08-10 07:55:12 +03:00
|
|
|
VALUE *vm_stack; /* must free, must mark */
|
|
|
|
size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
|
2017-05-09 08:06:41 +03:00
|
|
|
rb_control_frame_t *cfp;
|
2017-06-26 10:56:44 +03:00
|
|
|
|
|
|
|
struct rb_vm_tag *tag;
|
2017-06-28 05:50:56 +03:00
|
|
|
|
2017-11-06 10:44:28 +03:00
|
|
|
/* interrupt flags */
|
|
|
|
rb_atomic_t interrupt_flag;
|
2018-05-15 13:11:32 +03:00
|
|
|
rb_atomic_t interrupt_mask; /* size should match flag */
|
2021-04-29 15:31:05 +03:00
|
|
|
#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
|
2020-11-11 08:37:31 +03:00
|
|
|
uint32_t checked_clock;
|
|
|
|
#endif
|
2017-11-06 10:44:28 +03:00
|
|
|
|
2017-11-06 08:41:48 +03:00
|
|
|
rb_fiber_t *fiber_ptr;
|
|
|
|
struct rb_thread_struct *thread_ptr;
|
2017-06-28 17:27:49 +03:00
|
|
|
|
2017-06-28 05:50:56 +03:00
|
|
|
/* storage (ec (fiber) local) */
|
2020-01-04 03:45:58 +03:00
|
|
|
struct rb_id_table *local_storage;
|
2017-06-28 05:50:56 +03:00
|
|
|
VALUE local_storage_recursive_hash;
|
|
|
|
VALUE local_storage_recursive_hash_for_trace;
|
2017-06-28 09:09:06 +03:00
|
|
|
|
2022-12-01 13:00:33 +03:00
|
|
|
/* Inheritable fiber storage. */
|
|
|
|
VALUE storage;
|
|
|
|
|
2017-06-28 09:09:06 +03:00
|
|
|
/* eval env */
|
|
|
|
const VALUE *root_lep;
|
|
|
|
VALUE root_svar;
|
|
|
|
|
|
|
|
/* ensure & callcc */
|
|
|
|
rb_ensure_list_t *ensure_list;
|
2017-09-08 09:21:30 +03:00
|
|
|
|
2017-11-06 08:41:48 +03:00
|
|
|
/* trace information */
|
|
|
|
struct rb_trace_arg_struct *trace_arg;
|
2017-10-28 13:01:54 +03:00
|
|
|
|
2017-11-06 08:41:48 +03:00
|
|
|
/* temporary places */
|
|
|
|
VALUE errinfo;
|
|
|
|
VALUE passed_block_handler; /* for rb_iterate */
|
2018-10-19 23:56:10 +03:00
|
|
|
|
|
|
|
uint8_t raised_flag; /* only 3 bits needed */
|
|
|
|
|
|
|
|
/* n.b. only 7 bits needed, really: */
|
|
|
|
BITFIELD(enum method_missing_reason, method_missing_reason, 8);
|
|
|
|
|
2018-07-06 16:56:58 +03:00
|
|
|
VALUE private_const_reference;
|
2017-10-29 15:57:04 +03:00
|
|
|
|
2017-09-10 18:49:45 +03:00
|
|
|
/* for GC */
|
|
|
|
struct {
|
|
|
|
VALUE *stack_start;
|
|
|
|
VALUE *stack_end;
|
|
|
|
size_t stack_maxsize;
|
2018-05-09 12:53:19 +03:00
|
|
|
RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
|
2024-02-24 11:31:27 +03:00
|
|
|
|
|
|
|
#ifdef RUBY_ASAN_ENABLED
|
|
|
|
void *asan_fake_stack_handle;
|
|
|
|
#endif
|
2017-09-10 18:49:45 +03:00
|
|
|
} machine;
|
2020-03-19 07:25:53 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifndef rb_execution_context_t
|
|
|
|
typedef struct rb_execution_context_struct rb_execution_context_t;
|
|
|
|
#define rb_execution_context_t rb_execution_context_t
|
|
|
|
#endif
|
2017-05-09 08:06:41 +03:00
|
|
|
|
2019-11-07 10:58:00 +03:00
|
|
|
// for builtin.h
|
|
|
|
#define VM_CORE_H_EC_DEFINED 1
|
|
|
|
|
2019-06-19 09:47:15 +03:00
|
|
|
// Set the vm_stack pointer in the execution context.
|
2018-11-16 09:51:57 +03:00
|
|
|
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
|
2017-10-26 11:32:49 +03:00
|
|
|
|
2019-06-19 09:47:15 +03:00
|
|
|
// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
|
|
|
|
// @param ec the execution context to update.
|
|
|
|
// @param stack a pointer to the stack to use.
|
|
|
|
// @param size the size of the stack, as in `VALUE stack[size]`.
|
|
|
|
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
|
|
|
|
|
2019-06-20 02:31:22 +03:00
|
|
|
// Clear (set to `NULL`) the vm_stack pointer.
|
|
|
|
// @param ec the execution context to update.
|
|
|
|
void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
|
|
|
|
|
2020-11-30 10:18:43 +03:00
|
|
|
struct rb_ext_config {
|
|
|
|
bool ractor_safe;
|
|
|
|
};
|
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
typedef struct rb_ractor_struct rb_ractor_t;
|
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
struct rb_native_thread;
|
2021-05-22 15:36:27 +03:00
|
|
|
|
2010-10-31 04:42:54 +03:00
|
|
|
typedef struct rb_thread_struct {
|
2022-03-30 10:36:31 +03:00
|
|
|
struct ccan_list_node lt_node; // managed by a ractor
|
2006-12-31 18:02:22 +03:00
|
|
|
VALUE self;
|
2020-03-09 20:22:11 +03:00
|
|
|
rb_ractor_t *ractor;
|
* blockinlining.c, compile.c, compile.h, error.c, eval.c,
eval_intern.h, eval_jump.h, eval_load.c, eval_method.h,
eval_safe.h, gc.c, insnhelper.h, insns.def, iseq.c, proc.c,
process.c, signal.c, thread.c, thread_pthread.ci, thread_win32.ci,
vm.c, vm.h, vm_dump.c, vm_evalbody.ci, vm_macro.def,
yarv.h, yarvcore.h, yarvcore.c: change type and macro names:
* yarv_*_t -> rb_*_t
* yarv_*_struct -> rb_*_struct
* yarv_tag -> rb_vm_tag
* YARV_* -> RUBY_VM_*
* proc.c, vm.c: move functions about env object creation
from proc.c to vm.c.
* proc.c, yarvcore.c: fix rb_cVM initialization place.
* inits.c: change Init_ISeq() order (after Init_VM).
* ruby.h, proc.c: change declaration place of rb_cEnv
from proc.c to ruby.c.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11651 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-06 22:00:03 +03:00
|
|
|
rb_vm_t *vm;
|
2022-04-22 15:19:03 +03:00
|
|
|
struct rb_native_thread *nt;
|
2017-10-26 11:32:49 +03:00
|
|
|
rb_execution_context_t *ec;
|
2017-06-26 10:56:44 +03:00
|
|
|
|
2022-04-22 15:19:03 +03:00
|
|
|
struct rb_thread_sched_item sched;
|
2023-12-28 21:52:45 +03:00
|
|
|
bool mn_schedulable;
|
2022-05-20 09:47:20 +03:00
|
|
|
rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
|
2022-04-22 15:19:03 +03:00
|
|
|
|
2008-07-10 07:10:00 +04:00
|
|
|
VALUE last_status; /* $? */
|
2008-12-09 10:17:10 +03:00
|
|
|
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 08:22:31 +04:00
|
|
|
/* for cfunc */
|
2015-09-19 20:59:58 +03:00
|
|
|
struct rb_calling_info *calling;
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 08:22:31 +04:00
|
|
|
|
2007-02-25 19:29:26 +03:00
|
|
|
/* for load(true) */
|
|
|
|
VALUE top_self;
|
|
|
|
VALUE top_wrapper;
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
/* thread control */
|
2022-04-22 15:19:03 +03:00
|
|
|
|
2018-08-22 07:04:06 +03:00
|
|
|
BITFIELD(enum rb_thread_status, status, 2);
|
2018-08-15 08:54:41 +03:00
|
|
|
/* bit flags */
|
2023-03-31 11:42:03 +03:00
|
|
|
unsigned int has_dedicated_nt : 1;
|
2018-08-15 08:54:41 +03:00
|
|
|
unsigned int to_kill : 1;
|
|
|
|
unsigned int abort_on_exception: 1;
|
|
|
|
unsigned int report_on_exception: 1;
|
|
|
|
unsigned int pending_interrupt_queue_checked: 1;
|
|
|
|
int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
|
|
|
|
uint32_t running_time_us; /* 12500..800000 */
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2008-12-30 10:57:53 +03:00
|
|
|
void *blocking_region_buffer;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
VALUE thgroup;
|
|
|
|
VALUE value;
|
|
|
|
|
2012-08-07 15:13:57 +04:00
|
|
|
/* temporary place of retval on OPT_CALL_THREADED_CODE */
|
|
|
|
#if OPT_CALL_THREADED_CODE
|
|
|
|
VALUE retval;
|
|
|
|
#endif
|
|
|
|
|
2012-07-18 09:46:40 +04:00
|
|
|
/* async errinfo queue */
|
2012-12-23 14:18:58 +04:00
|
|
|
VALUE pending_interrupt_queue;
|
|
|
|
VALUE pending_interrupt_mask_stack;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2017-11-06 10:44:28 +03:00
|
|
|
/* interrupt management */
|
2013-07-23 13:53:14 +04:00
|
|
|
rb_nativethread_lock_t interrupt_lock;
|
2008-05-30 05:52:38 +04:00
|
|
|
struct rb_unblock_callback unblock;
|
2008-06-12 17:01:38 +04:00
|
|
|
VALUE locking_mutex;
|
2008-07-27 19:20:01 +04:00
|
|
|
struct rb_mutex_struct *keeping_mutexes;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2020-09-21 00:54:08 +03:00
|
|
|
struct rb_waiting_list *join_list;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2018-11-08 08:01:23 +03:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
VALUE proc;
|
|
|
|
VALUE args;
|
2019-09-26 03:14:17 +03:00
|
|
|
int kw_splat;
|
2018-11-08 08:01:23 +03:00
|
|
|
} proc;
|
|
|
|
struct {
|
2019-08-26 09:53:57 +03:00
|
|
|
VALUE (*func)(void *);
|
2018-11-08 08:01:23 +03:00
|
|
|
void *arg;
|
|
|
|
} func;
|
|
|
|
} invoke_arg;
|
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
enum thread_invoke_type {
|
2018-11-08 08:01:23 +03:00
|
|
|
thread_invoke_type_none = 0,
|
|
|
|
thread_invoke_type_proc,
|
2020-03-09 20:22:11 +03:00
|
|
|
thread_invoke_type_ractor_proc,
|
2018-11-08 09:26:21 +03:00
|
|
|
thread_invoke_type_func
|
2018-11-08 08:01:23 +03:00
|
|
|
} invoke_type;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/* statistics data for profiler */
|
|
|
|
VALUE stat_insn_usage;
|
|
|
|
|
* cont.c: support Fiber. Check test/ruby/test_fiber.rb for detail.
Fiber is known as "Micro Thread", "Coroutine", and other terms.
At this time, only Fiber#pass is supported to change context.
I want to know more suitable method name/API for Fiber (... do you
know more suitable class name instead of Fiber?) as "suspend/resume",
"call", "yield", "start/kick/stop/restart", ....
* eval.c, eval_intern.h, thread.c, yarvcore.c, yarvcore.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12395 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-05-27 23:12:43 +04:00
|
|
|
/* fiber */
|
2014-10-16 02:35:08 +04:00
|
|
|
rb_fiber_t *root_fiber;
|
* cont.c: support Fiber. Check test/ruby/test_fiber.rb for detail.
Fiber is known as "Micro Thread", "Coroutine", and other terms.
At this time, only Fiber#pass is supported to change context.
I want to know more suitable method name/API for Fiber (... do you
know more suitable class name instead of Fiber?) as "suspend/resume",
"call", "yield", "start/kick/stop/restart", ....
* eval.c, eval_intern.h, thread.c, yarvcore.c, yarvcore.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12395 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-05-27 23:12:43 +04:00
|
|
|
|
2020-05-14 13:10:55 +03:00
|
|
|
VALUE scheduler;
|
2022-10-12 02:59:05 +03:00
|
|
|
unsigned int blocking;
|
2020-05-14 13:10:55 +03:00
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
/* misc */
|
2018-07-29 04:32:37 +03:00
|
|
|
VALUE name;
|
2023-11-16 20:29:11 +03:00
|
|
|
void **specific_storage;
|
2018-07-29 04:32:37 +03:00
|
|
|
|
2020-11-30 10:18:43 +03:00
|
|
|
struct rb_ext_config ext_config;
|
* common.mk: clean up
- remove blockinlining.$(OBJEXT) to built
- make ENCODING_H_INCLDUES variable (include/ruby/encoding.h)
- make VM_CORE_H_INCLUDES variable (vm_core.h)
- simplify rules.
- make depends rule to output depend status using gcc -MM.
* include/ruby/mvm.h, include/ruby/vm.h: rename mvm.h to vm.h.
* include/ruby.h: ditto.
* load.c: add inclusion explicitly.
* enumerator.c, object.c, parse.y, thread.c, vm_dump.c:
remove useless inclusion.
* eval_intern.h: cleanup inclusion.
* vm_core.h: rb_thread_t should be defined in this file.
* vm_evalbody.c, vm_exec.c: rename vm_evalbody.c to vm_exec.c.
* vm.h, vm_exec.h: rename vm.h to vm_exec.h.
* insnhelper.h, vm_insnhelper.h: rename insnhelper.h to vm_insnhelper.h.
* vm.c, vm_insnhelper.c, vm_insnhelper.h:
- rename vm_eval() to vm_exec_core().
- rename vm_eval_body() to vm_exec().
- cleanup include order.
* vm_method.c: fix comment.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@19466 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-09-23 04:20:28 +04:00
|
|
|
} rb_thread_t;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2022-05-23 21:57:06 +03:00
|
|
|
static inline unsigned int
|
|
|
|
rb_th_serial(const rb_thread_t *th)
|
|
|
|
{
|
2023-04-04 09:42:37 +03:00
|
|
|
return th ? (unsigned int)th->serial : 0;
|
2022-05-23 21:57:06 +03:00
|
|
|
}
|
|
|
|
|
2012-12-20 12:13:53 +04:00
|
|
|
typedef enum {
|
|
|
|
VM_DEFINECLASS_TYPE_CLASS = 0x00,
|
|
|
|
VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
|
|
|
|
VM_DEFINECLASS_TYPE_MODULE = 0x02,
|
|
|
|
/* 0x03..0x06 is reserved */
|
2013-10-22 16:59:27 +04:00
|
|
|
VM_DEFINECLASS_TYPE_MASK = 0x07
|
2012-12-20 12:13:53 +04:00
|
|
|
} rb_vm_defineclass_type_t;
|
|
|
|
|
2012-12-27 16:09:33 +04:00
|
|
|
#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
|
2012-12-20 12:13:53 +04:00
|
|
|
#define VM_DEFINECLASS_FLAG_SCOPED 0x08
|
|
|
|
#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
|
|
|
|
#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
|
|
|
|
#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
|
|
|
|
((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
|
|
|
|
|
* blockinlining.c: remove "yarv" prefix.
* array.c, numeric.c: ditto.
* insnhelper.ci, insns.def, vm_evalbody.ci: ditto.
* yarvcore.c: removed.
* yarvcore.h: renamed to core.h.
* cont.c, debug.c, error.c, process.c, signal.c : ditto.
* ext/probeprofiler/probeprofiler.c: ditto.
* id.c, id.h: added.
* inits.c: ditto.
* compile.c: rename internal functions.
* compile.h: fix debug flag.
* eval.c, object.c, vm.c: remove ruby_top_self.
use rb_vm_top_self() instead.
* eval_intern.h, eval_load: ditto.
* gc.c: rename yarv_machine_stack_mark() to
rb_gc_mark_machine_stack().
* insnhelper.h: remove unused macros.
* iseq.c: add iseq_compile() to create iseq object
from source string.
* proc.c: rename a internal function.
* template/insns.inc.tmpl: remove YARV prefix.
* thread.c:
* vm.c (rb_iseq_eval): added.
* vm.c: move some functions from yarvcore.c.
* vm_dump.c: fix to remove compiler warning.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@12741 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-07-12 08:25:46 +04:00
|
|
|
/* iseq.c */
|
2013-04-05 14:29:38 +04:00
|
|
|
RUBY_SYMBOL_EXPORT_BEGIN
|
* iseq.c, vm_eval.c: set th->base_block properly.
th->base_block is information for (a) parsing, (b) compiling
and (c) setting up the frame to execute the program passed by
`eval' method. For example, (1) parser need to know up-level
variables to detect it is variable or method without paren.
Befor (a), (b) and (c), VM set th->base_block by passed bindng
(or previous frame information). After execute (a), (b) and (c),
VM should clear th->base_block. However, if (a), (b) or (c)
raises an exception, then th->base_block is not cleared.
Problem is that the uncleared value th->balo_block is used for
irrelevant iseq compilation. It causes SEGV or critical error.
I tried to solve this problem: to clear them before exception,
but finally I found out that it is difficult to do it (Ruby
program can be run in many places).
Because of this background, I set th->base_block before
compiling iseq and restore it after compiling.
Basically, th->base_block is dirty hack (similar to global
variable) and this patch is also dirty.
* bootstraptest/test_eval.rb: add a test for above.
* internal.h: remove unused decl.
* iseq.c (rb_iseq_compile_with_option): add base_block parameter.
set th->base_block before compation and restore it after
compilation.
* ruby.c (require_libraries): pass 0 as base_block instead of
setting th->base_block
* tool/compile_prelude.rb (prelude_eval): apply above changes.
* vm.c, vm_eval.c: ditto.
* vm_core.h: add comments.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36179 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-06-22 13:32:56 +04:00
|
|
|
|
|
|
|
/* node -> iseq */
|
2024-05-03 02:57:55 +03:00
|
|
|
rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
|
|
|
|
rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
|
|
|
|
rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
|
|
|
|
rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
|
2024-06-29 08:05:05 +03:00
|
|
|
rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
|
2024-03-28 04:26:42 +03:00
|
|
|
enum rb_iseq_type, const rb_compile_option_t*,
|
|
|
|
VALUE script_lines);
|
2020-10-23 07:27:21 +03:00
|
|
|
|
2019-08-26 08:25:53 +03:00
|
|
|
struct iseq_link_anchor;
|
|
|
|
struct rb_iseq_new_with_callback_callback_func {
|
|
|
|
VALUE flags;
|
|
|
|
VALUE reserved;
|
|
|
|
void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
|
|
|
|
const void *data;
|
|
|
|
};
|
|
|
|
static inline struct rb_iseq_new_with_callback_callback_func *
|
|
|
|
rb_iseq_new_with_callback_new_callback(
|
|
|
|
void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
|
|
|
|
{
|
2024-02-20 23:58:10 +03:00
|
|
|
struct rb_iseq_new_with_callback_callback_func *memo =
|
|
|
|
IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
|
|
|
|
memo->func = func;
|
|
|
|
memo->data = ptr;
|
|
|
|
|
|
|
|
return memo;
|
2019-08-26 08:25:53 +03:00
|
|
|
}
|
|
|
|
rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
|
2022-09-25 10:45:28 +03:00
|
|
|
VALUE name, VALUE path, VALUE realpath, int first_lineno,
|
2022-07-22 10:57:25 +03:00
|
|
|
const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
|
* iseq.c, vm_eval.c: set th->base_block properly.
th->base_block is information for (a) parsing, (b) compiling
and (c) setting up the frame to execute the program passed by
`eval' method. For example, (1) parser need to know up-level
variables to detect it is variable or method without paren.
Befor (a), (b) and (c), VM set th->base_block by passed bindng
(or previous frame information). After execute (a), (b) and (c),
VM should clear th->base_block. However, if (a), (b) or (c)
raises an exception, then th->base_block is not cleared.
Problem is that the uncleared value th->balo_block is used for
irrelevant iseq compilation. It causes SEGV or critical error.
I tried to solve this problem: to clear them before exception,
but finally I found out that it is difficult to do it (Ruby
program can be run in many places).
Because of this background, I set th->base_block before
compiling iseq and restore it after compiling.
Basically, th->base_block is dirty hack (similar to global
variable) and this patch is also dirty.
* bootstraptest/test_eval.rb: add a test for above.
* internal.h: remove unused decl.
* iseq.c (rb_iseq_compile_with_option): add base_block parameter.
set th->base_block before compation and restore it after
compilation.
* ruby.c (require_libraries): pass 0 as base_block instead of
setting th->base_block
* tool/compile_prelude.rb (prelude_eval): apply above changes.
* vm.c, vm_eval.c: ditto.
* vm_core.h: add comments.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36179 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-06-22 13:32:56 +04:00
|
|
|
|
2015-07-22 01:52:59 +03:00
|
|
|
VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
|
2014-06-18 10:16:39 +04:00
|
|
|
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2015-12-02 11:05:36 +03:00
|
|
|
VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
|
|
|
|
|
2008-06-29 21:26:16 +04:00
|
|
|
RUBY_EXTERN VALUE rb_cISeq;
|
|
|
|
RUBY_EXTERN VALUE rb_cRubyVM;
|
2008-07-01 07:05:58 +04:00
|
|
|
RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
|
2019-07-14 12:08:47 +03:00
|
|
|
RUBY_EXTERN VALUE rb_block_param_proxy;
|
2013-04-05 14:29:38 +04:00
|
|
|
RUBY_SYMBOL_EXPORT_END
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
#define GetProcPtr(obj, ptr) \
|
2011-01-21 18:54:58 +03:00
|
|
|
GetCoreDataFromValue((obj), rb_proc_t, (ptr))
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
typedef struct {
|
2016-07-28 14:02:30 +03:00
|
|
|
const struct rb_block block;
|
2017-12-28 23:09:24 +03:00
|
|
|
unsigned int is_from_method: 1; /* bool */
|
|
|
|
unsigned int is_lambda: 1; /* bool */
|
2020-03-09 20:22:11 +03:00
|
|
|
unsigned int is_isolated: 1; /* bool */
|
* blockinlining.c, compile.c, compile.h, error.c, eval.c,
eval_intern.h, eval_jump.h, eval_load.c, eval_method.h,
eval_safe.h, gc.c, insnhelper.h, insns.def, iseq.c, proc.c,
process.c, signal.c, thread.c, thread_pthread.ci, thread_win32.ci,
vm.c, vm.h, vm_dump.c, vm_evalbody.ci, vm_macro.def,
yarv.h, yarvcore.h, yarvcore.c: change type and macro names:
* yarv_*_t -> rb_*_t
* yarv_*_struct -> rb_*_struct
* yarv_tag -> rb_vm_tag
* YARV_* -> RUBY_VM_*
* proc.c, vm.c: move functions about env object creation
from proc.c to vm.c.
* proc.c, yarvcore.c: fix rb_cVM initialization place.
* inits.c: change Init_ISeq() order (after Init_VM).
* ruby.h, proc.c: change declaration place of rb_cEnv
from proc.c to ruby.c.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11651 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-06 22:00:03 +03:00
|
|
|
} rb_proc_t;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2021-01-06 09:30:56 +03:00
|
|
|
RUBY_SYMBOL_EXPORT_BEGIN
|
2020-03-09 20:22:11 +03:00
|
|
|
VALUE rb_proc_isolate(VALUE self);
|
|
|
|
VALUE rb_proc_isolate_bang(VALUE self);
|
2020-10-29 18:32:53 +03:00
|
|
|
VALUE rb_proc_ractor_make_shareable(VALUE self);
|
2021-01-06 09:30:56 +03:00
|
|
|
RUBY_SYMBOL_EXPORT_END
|
2020-03-09 20:22:11 +03:00
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
typedef struct {
|
2016-07-28 22:13:26 +03:00
|
|
|
VALUE flags; /* imemo header */
|
2019-04-20 04:19:47 +03:00
|
|
|
rb_iseq_t *iseq;
|
2016-07-28 22:13:26 +03:00
|
|
|
const VALUE *ep;
|
|
|
|
const VALUE *env;
|
|
|
|
unsigned int env_size;
|
* blockinlining.c, compile.c, compile.h, error.c, eval.c,
eval_intern.h, eval_jump.h, eval_load.c, eval_method.h,
eval_safe.h, gc.c, insnhelper.h, insns.def, iseq.c, proc.c,
process.c, signal.c, thread.c, thread_pthread.ci, thread_win32.ci,
vm.c, vm.h, vm_dump.c, vm_evalbody.ci, vm_macro.def,
yarv.h, yarvcore.h, yarvcore.c: change type and macro names:
* yarv_*_t -> rb_*_t
* yarv_*_struct -> rb_*_struct
* yarv_tag -> rb_vm_tag
* YARV_* -> RUBY_VM_*
* proc.c, vm.c: move functions about env object creation
from proc.c to vm.c.
* proc.c, yarvcore.c: fix rb_cVM initialization place.
* inits.c: change Init_ISeq() order (after Init_VM).
* ruby.h, proc.c: change declaration place of rb_cEnv
from proc.c to ruby.c.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11651 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-06 22:00:03 +03:00
|
|
|
} rb_env_t;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2013-07-22 11:32:52 +04:00
|
|
|
extern const rb_data_type_t ruby_binding_data_type;
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
#define GetBindingPtr(obj, ptr) \
|
2011-01-21 18:54:58 +03:00
|
|
|
GetCoreDataFromValue((obj), rb_binding_t, (ptr))
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
typedef struct {
|
2017-06-01 18:12:14 +03:00
|
|
|
const struct rb_block block;
|
|
|
|
const VALUE pathobj;
|
2022-09-25 11:07:18 +03:00
|
|
|
int first_lineno;
|
* blockinlining.c, compile.c, compile.h, error.c, eval.c,
eval_intern.h, eval_jump.h, eval_load.c, eval_method.h,
eval_safe.h, gc.c, insnhelper.h, insns.def, iseq.c, proc.c,
process.c, signal.c, thread.c, thread_pthread.ci, thread_win32.ci,
vm.c, vm.h, vm_dump.c, vm_evalbody.ci, vm_macro.def,
yarv.h, yarvcore.h, yarvcore.c: change type and macro names:
* yarv_*_t -> rb_*_t
* yarv_*_struct -> rb_*_struct
* yarv_tag -> rb_vm_tag
* YARV_* -> RUBY_VM_*
* proc.c, vm.c: move functions about env object creation
from proc.c to vm.c.
* proc.c, yarvcore.c: fix rb_cVM initialization place.
* inits.c: change Init_ISeq() order (after Init_VM).
* ruby.h, proc.c: change declaration place of rb_cEnv
from proc.c to ruby.c.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11651 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-06 22:00:03 +03:00
|
|
|
} rb_binding_t;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/* used by compile time and send insn */
|
2012-08-08 11:52:19 +04:00
|
|
|
|
|
|
|
enum vm_check_match_type {
|
|
|
|
VM_CHECKMATCH_TYPE_WHEN = 1,
|
|
|
|
VM_CHECKMATCH_TYPE_CASE = 2,
|
|
|
|
VM_CHECKMATCH_TYPE_RESCUE = 3
|
|
|
|
};
|
|
|
|
|
|
|
|
#define VM_CHECKMATCH_TYPE_MASK 0x03
|
|
|
|
#define VM_CHECKMATCH_ARRAY 0x04
|
|
|
|
|
2010-10-31 04:42:54 +03:00
|
|
|
enum vm_special_object_type {
|
|
|
|
VM_SPECIAL_OBJECT_VMCORE = 1,
|
|
|
|
VM_SPECIAL_OBJECT_CBASE,
|
2010-12-10 05:42:06 +03:00
|
|
|
VM_SPECIAL_OBJECT_CONST_BASE
|
2010-10-31 04:42:54 +03:00
|
|
|
};
|
2008-07-01 07:05:58 +04:00
|
|
|
|
2015-02-27 11:10:04 +03:00
|
|
|
enum vm_svar_index {
|
|
|
|
VM_SVAR_LASTLINE = 0, /* $_ */
|
|
|
|
VM_SVAR_BACKREF = 1, /* $~ */
|
|
|
|
|
|
|
|
VM_SVAR_EXTRA_START = 2,
|
|
|
|
VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
|
|
|
|
};
|
|
|
|
|
2009-07-13 13:30:23 +04:00
|
|
|
/* inline cache */
|
2021-01-04 12:08:25 +03:00
|
|
|
typedef struct iseq_inline_constant_cache *IC;
|
2019-10-12 03:06:41 +03:00
|
|
|
typedef struct iseq_inline_iv_cache_entry *IVC;
|
2021-06-01 20:34:06 +03:00
|
|
|
typedef struct iseq_inline_cvar_cache_entry *ICVARC;
|
2018-03-19 21:21:54 +03:00
|
|
|
typedef union iseq_inline_storage_entry *ISE;
|
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of
(mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and
mid+argc+flags only requires 64bits. So this patch packed
rb_call_info to VALUE (1 word) on such cases. If we can not
represent it in VALUE, then use imemo_callinfo which contains
conventional callinfo (rb_callinfo, renamed from rb_call_info).
iseq->body->ci_kw_size is removed because all of callinfo is VALUE
size (packed ci or a pointer to imemo_callinfo).
To access ci information, we need to use these functions:
vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci).
struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg.
rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc()
is temporary removed because cd->ci should be marked.
2020-01-08 02:20:36 +03:00
|
|
|
typedef const struct rb_callinfo *CALL_INFO;
|
2020-01-08 10:14:01 +03:00
|
|
|
typedef const struct rb_callcache *CALL_CACHE;
|
2019-07-31 04:36:05 +03:00
|
|
|
typedef struct rb_call_data *CALL_DATA;
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
typedef VALUE CDHASH;
|
|
|
|
|
2007-07-01 22:16:02 +04:00
|
|
|
#ifndef FUNC_FASTCALL
|
|
|
|
#define FUNC_FASTCALL(x) x
|
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2007-07-01 22:16:02 +04:00
|
|
|
typedef rb_control_frame_t *
|
2017-10-27 22:08:31 +03:00
|
|
|
(FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
|
* this commit is a result of refactoring. only renaming functions,
moving definitions place, add/remove prototypes, deleting
unused variables and removing yarv.h.
This commit doesn't change any behavior of ruby/vm.
* yarv.h, common.mk: remove yarv.h (contents are moved to yarvcore.h).
* error.c, eval_intern.h: include yarvcore.h instead yarv.h
* rename some functions:
* debug.[ch]: debug_*() -> ruby_debug_*()
* iseq.c: iseq_*() -> rb_iseq_*(), ruby_iseq_disasm()
* iseq.c: node_name() -> ruby_node_name()
* vm.c: yarv_check_redefinition_opt_method() ->
rb_vm_check_redefinition_opt_method()
* some refactoring with checking -Wall.
* array.c: remove rb_ary_ptr() (unused) and remove unused
local variables.
* object.c: add a prototype of rb_mod_module_exec().
* eval_intern.h (ruby_cref): set it inline.
* eval_load.c (rb_load), yarvcore.c: yarv_load() -> rb_load_internal().
* parse.y: add a prototype of rb_parse_in_eval() (in eval.c).
* process.c: add a prototype of rb_thread_stop_timer_thread() (in thread.c).
* thread.c: remove raw_gets() function (unused) and fix some format
mismatch (format mismatchs have remained yet. this is todo).
* thread.c (rb_thread_wait_fd_rw): fix typo on label name.
* thread_pthread.ci: comment out codes with USE_THREAD_CACHE.
* vm.c (rb_svar, rb_backref_get, rb_backref_get,
rb_lastline_get, rb_lastline_set) : moved from yarvcore.c.
* vm.c (yarv_init_redefined_flag): add a prototype and rename
yarv_opt_method_table to vm_opt_method_table.
* vm.c (rb_thread_eval): moved from yarvcore.c.
* yarvcore.c: remove unused global variables and fix to use nsdr().
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11652 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-07 04:25:05 +03:00
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
|
2018-07-15 14:42:15 +03:00
|
|
|
#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
|
2016-07-28 14:02:30 +03:00
|
|
|
|
|
|
|
#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
|
|
|
|
#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
|
2011-01-21 18:54:58 +03:00
|
|
|
#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
|
2006-12-31 18:02:22 +03:00
|
|
|
|
Rust YJIT
In December 2021, we opened an [issue] to solicit feedback regarding the
porting of the YJIT codebase from C99 to Rust. There were some
reservations, but this project was given the go ahead by Ruby core
developers and Matz. Since then, we have successfully completed the port
of YJIT to Rust.
The new Rust version of YJIT has reached parity with the C version, in
that it passes all the CRuby tests, is able to run all of the YJIT
benchmarks, and performs similarly to the C version (because it works
the same way and largely generates the same machine code). We've even
incorporated some design improvements, such as a more fine-grained
constant invalidation mechanism which we expect will make a big
difference in Ruby on Rails applications.
Because we want to be careful, YJIT is guarded behind a configure
option:
```shell
./configure --enable-yjit # Build YJIT in release mode
./configure --enable-yjit=dev # Build YJIT in dev/debug mode
```
By default, YJIT does not get compiled and cargo/rustc is not required.
If YJIT is built in dev mode, then `cargo` is used to fetch development
dependencies, but when building in release, `cargo` is not required,
only `rustc`. At the moment YJIT requires Rust 1.60.0 or newer.
The YJIT command-line options remain mostly unchanged, and more details
about the build process are documented in `doc/yjit/yjit.md`.
The CI tests have been updated and do not take any more resources than
before.
The development history of the Rust port is available at the following
commit for interested parties:
https://github.com/Shopify/ruby/commit/1fd9573d8b4b65219f1c2407f30a0a60e537f8be
Our hope is that Rust YJIT will be compiled and included as a part of
system packages and compiled binaries of the Ruby 3.2 release. We do not
anticipate any major problems as Rust is well supported on every
platform which YJIT supports, but to make sure that this process works
smoothly, we would like to reach out to those who take care of building
systems packages before the 3.2 release is shipped and resolve any
issues that may come up.
[issue]: https://bugs.ruby-lang.org/issues/18481
Co-authored-by: Maxime Chevalier-Boisvert <maximechevalierb@gmail.com>
Co-authored-by: Noah Gibbs <the.codefolio.guy@gmail.com>
Co-authored-by: Kevin Newton <kddnewton@gmail.com>
2022-04-19 21:40:21 +03:00
|
|
|
enum vm_frame_env_flags {
|
2016-07-28 14:02:30 +03:00
|
|
|
/* Frame/Environment flag bits:
|
2022-07-13 10:16:46 +03:00
|
|
|
* MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
|
2016-07-28 14:02:30 +03:00
|
|
|
*
|
|
|
|
* X : tag for GC marking (It seems as Fixnum)
|
2022-07-13 10:16:46 +03:00
|
|
|
* EEE : 4 bits Env flags
|
2019-10-04 22:51:57 +03:00
|
|
|
* FF..: 7 bits Frame flags
|
2018-01-02 09:41:52 +03:00
|
|
|
* MM..: 15 bits frame magic (to check frame corruption)
|
2016-07-28 14:02:30 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* frame types */
|
|
|
|
VM_FRAME_MAGIC_METHOD = 0x11110001,
|
|
|
|
VM_FRAME_MAGIC_BLOCK = 0x22220001,
|
|
|
|
VM_FRAME_MAGIC_CLASS = 0x33330001,
|
|
|
|
VM_FRAME_MAGIC_TOP = 0x44440001,
|
|
|
|
VM_FRAME_MAGIC_CFUNC = 0x55550001,
|
2017-06-03 13:07:44 +03:00
|
|
|
VM_FRAME_MAGIC_IFUNC = 0x66660001,
|
|
|
|
VM_FRAME_MAGIC_EVAL = 0x77770001,
|
2018-01-02 09:41:52 +03:00
|
|
|
VM_FRAME_MAGIC_RESCUE = 0x78880001,
|
|
|
|
VM_FRAME_MAGIC_DUMMY = 0x79990001,
|
2016-07-28 14:02:30 +03:00
|
|
|
|
2018-01-02 09:41:52 +03:00
|
|
|
VM_FRAME_MAGIC_MASK = 0x7fff0001,
|
2016-07-28 14:02:30 +03:00
|
|
|
|
|
|
|
/* frame flag */
|
|
|
|
VM_FRAME_FLAG_FINISH = 0x0020,
|
|
|
|
VM_FRAME_FLAG_BMETHOD = 0x0040,
|
2016-08-03 03:16:34 +03:00
|
|
|
VM_FRAME_FLAG_CFRAME = 0x0080,
|
2017-06-03 13:07:44 +03:00
|
|
|
VM_FRAME_FLAG_LAMBDA = 0x0100,
|
2017-10-24 14:13:49 +03:00
|
|
|
VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
|
2019-09-03 21:32:02 +03:00
|
|
|
VM_FRAME_FLAG_CFRAME_KW = 0x0400,
|
2020-10-23 07:27:21 +03:00
|
|
|
VM_FRAME_FLAG_PASSED = 0x0800,
|
2016-07-28 14:02:30 +03:00
|
|
|
|
|
|
|
/* env flag */
|
|
|
|
VM_ENV_FLAG_LOCAL = 0x0002,
|
|
|
|
VM_ENV_FLAG_ESCAPED = 0x0004,
|
2020-10-23 07:27:21 +03:00
|
|
|
VM_ENV_FLAG_WB_REQUIRED = 0x0008,
|
|
|
|
VM_ENV_FLAG_ISOLATED = 0x0010,
|
2016-07-28 14:02:30 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#define VM_ENV_DATA_SIZE ( 3)
|
2012-06-11 07:14:59 +04:00
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
|
|
|
|
#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
|
|
|
|
#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
|
|
|
|
#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
|
|
|
|
|
|
|
|
#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
|
|
|
|
|
2016-08-03 03:28:12 +03:00
|
|
|
static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
|
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
static inline void
|
|
|
|
VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
|
|
|
|
{
|
|
|
|
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
|
|
|
VM_ASSERT(FIXNUM_P(flags));
|
|
|
|
VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
|
|
|
|
{
|
|
|
|
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
|
|
|
VM_ASSERT(FIXNUM_P(flags));
|
|
|
|
VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
|
|
|
|
}
|
|
|
|
|
2016-08-02 04:47:21 +03:00
|
|
|
static inline unsigned long
|
2016-07-28 14:02:30 +03:00
|
|
|
VM_ENV_FLAGS(const VALUE *ep, long flag)
|
|
|
|
{
|
|
|
|
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
|
|
|
VM_ASSERT(FIXNUM_P(flags));
|
|
|
|
return flags & flag;
|
|
|
|
}
|
2012-06-11 07:14:59 +04:00
|
|
|
|
2016-08-02 04:47:21 +03:00
|
|
|
static inline unsigned long
|
2016-07-28 14:02:30 +03:00
|
|
|
VM_FRAME_TYPE(const rb_control_frame_t *cfp)
|
|
|
|
{
|
|
|
|
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
|
|
|
|
}
|
|
|
|
|
2017-06-03 13:07:44 +03:00
|
|
|
static inline int
|
|
|
|
VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
|
|
|
|
{
|
|
|
|
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
|
|
|
|
}
|
|
|
|
|
2019-09-03 21:32:02 +03:00
|
|
|
static inline int
|
|
|
|
VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
|
|
|
|
{
|
|
|
|
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
|
|
|
|
}
|
|
|
|
|
2016-08-03 03:28:12 +03:00
|
|
|
static inline int
|
|
|
|
VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
|
|
|
|
{
|
2017-06-03 13:07:44 +03:00
|
|
|
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
|
2016-08-03 03:28:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
|
|
|
|
{
|
|
|
|
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
|
|
|
|
}
|
|
|
|
|
2016-08-03 04:50:50 +03:00
|
|
|
static inline int
|
|
|
|
rb_obj_is_iseq(VALUE iseq)
|
|
|
|
{
|
2017-04-07 09:41:32 +03:00
|
|
|
return imemo_type_p(iseq, imemo_iseq);
|
2016-08-03 04:50:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
|
|
|
|
{
|
|
|
|
int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
|
2022-10-17 11:50:42 +03:00
|
|
|
VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
|
|
|
|
(VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
|
2016-08-03 04:50:50 +03:00
|
|
|
return cframe_p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
|
|
|
|
{
|
|
|
|
return !VM_FRAME_CFRAME_P(cfp);
|
|
|
|
}
|
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
#define RUBYVM_CFUNC_FRAME_P(cfp) \
|
|
|
|
(VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
|
|
|
|
|
|
|
|
#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
|
|
|
|
#define VM_BLOCK_HANDLER_NONE 0
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
VM_ENV_LOCAL_P(const VALUE *ep)
|
|
|
|
{
|
2016-07-29 04:51:09 +03:00
|
|
|
return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
|
2016-07-28 14:02:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline const VALUE *
|
|
|
|
VM_ENV_PREV_EP(const VALUE *ep)
|
|
|
|
{
|
|
|
|
VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
|
|
|
|
return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
VM_ENV_BLOCK_HANDLER(const VALUE *ep)
|
|
|
|
{
|
|
|
|
VM_ASSERT(VM_ENV_LOCAL_P(ep));
|
|
|
|
return ep[VM_ENV_DATA_INDEX_SPECVAL];
|
|
|
|
}
|
|
|
|
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
int rb_vm_ep_in_heap_p(const VALUE *ep);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
VM_ENV_ESCAPED_P(const VALUE *ep)
|
|
|
|
{
|
|
|
|
VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
|
|
|
|
return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2016-07-28 22:13:26 +03:00
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
static inline int
|
|
|
|
vm_assert_env(VALUE obj)
|
|
|
|
{
|
2017-04-07 09:41:32 +03:00
|
|
|
VM_ASSERT(imemo_type_p(obj, imemo_env));
|
2016-07-28 22:13:26 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-10-25 12:32:44 +03:00
|
|
|
RBIMPL_ATTR_NONNULL((1))
|
2016-07-28 14:02:30 +03:00
|
|
|
static inline VALUE
|
|
|
|
VM_ENV_ENVVAL(const VALUE *ep)
|
|
|
|
{
|
2016-07-28 22:13:26 +03:00
|
|
|
VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
|
2016-07-28 14:02:30 +03:00
|
|
|
VM_ASSERT(VM_ENV_ESCAPED_P(ep));
|
2016-07-28 22:13:26 +03:00
|
|
|
VM_ASSERT(vm_assert_env(envval));
|
|
|
|
return envval;
|
|
|
|
}
|
|
|
|
|
2017-10-25 12:32:44 +03:00
|
|
|
RBIMPL_ATTR_NONNULL((1))
|
2016-07-28 22:13:26 +03:00
|
|
|
static inline const rb_env_t *
|
|
|
|
VM_ENV_ENVVAL_PTR(const VALUE *ep)
|
|
|
|
{
|
|
|
|
return (const rb_env_t *)VM_ENV_ENVVAL(ep);
|
2016-07-28 14:02:30 +03:00
|
|
|
}
|
|
|
|
|
2016-07-28 22:13:26 +03:00
|
|
|
static inline const rb_env_t *
|
|
|
|
vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
|
|
|
|
{
|
2024-02-20 23:58:10 +03:00
|
|
|
rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
|
|
|
|
env->ep = env_ep;
|
|
|
|
env->env = env_body;
|
2016-07-28 22:13:26 +03:00
|
|
|
env->env_size = env_size;
|
|
|
|
env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
|
|
|
|
return env;
|
|
|
|
}
|
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
static inline void
|
|
|
|
VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
|
|
|
|
{
|
|
|
|
*((VALUE *)ptr) = v;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
|
|
|
|
{
|
|
|
|
VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
|
|
|
|
VM_FORCE_WRITE(ptr, special_const_value);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
|
|
|
|
{
|
|
|
|
VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
|
|
|
|
VM_FORCE_WRITE(&ep[index], v);
|
|
|
|
}
|
|
|
|
|
|
|
|
const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
|
2017-03-14 09:52:44 +03:00
|
|
|
const VALUE *rb_vm_proc_local_ep(VALUE proc);
|
2017-06-01 18:12:14 +03:00
|
|
|
void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
|
|
|
|
void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
|
2017-03-14 09:52:44 +03:00
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
|
2012-06-04 11:24:44 +04:00
|
|
|
|
2011-01-21 18:54:58 +03:00
|
|
|
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
|
|
|
|
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
|
2017-10-26 11:41:34 +03:00
|
|
|
|
* blockinlining.c, compile.c, compile.h, error.c, eval.c,
eval_intern.h, eval_jump.h, eval_load.c, eval_method.h,
eval_safe.h, gc.c, insnhelper.h, insns.def, iseq.c, proc.c,
process.c, signal.c, thread.c, thread_pthread.ci, thread_win32.ci,
vm.c, vm.h, vm_dump.c, vm_evalbody.ci, vm_macro.def,
yarv.h, yarvcore.h, yarvcore.c: change type and macro names:
* yarv_*_t -> rb_*_t
* yarv_*_struct -> rb_*_struct
* yarv_tag -> rb_vm_tag
* YARV_* -> RUBY_VM_*
* proc.c, vm.c: move functions about env object creation
from proc.c to vm.c.
* proc.c, yarvcore.c: fix rb_cVM initialization place.
* inits.c: change Init_ISeq() order (after Init_VM).
* ruby.h, proc.c: change declaration place of rb_cEnv
from proc.c to ruby.c.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11651 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-06 22:00:03 +03:00
|
|
|
#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
|
2006-12-31 18:02:22 +03:00
|
|
|
((void *)(ecfp) > (void *)(cfp))
|
2017-10-26 11:41:34 +03:00
|
|
|
|
|
|
|
static inline const rb_control_frame_t *
|
|
|
|
RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
|
|
|
|
{
|
|
|
|
return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
|
|
|
|
{
|
|
|
|
return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
|
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2016-07-28 14:02:30 +03:00
|
|
|
static inline int
|
|
|
|
VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
|
|
|
|
{
|
|
|
|
if ((block_handler & 0x03) == 0x01) {
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
2017-04-07 09:41:32 +03:00
|
|
|
VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
|
2016-07-28 14:02:30 +03:00
|
|
|
#endif
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
|
|
|
|
{
|
|
|
|
VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
|
|
|
|
VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
|
|
|
|
return block_handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const struct rb_captured_block *
|
|
|
|
VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
|
|
|
|
{
|
|
|
|
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
|
|
|
VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
|
|
|
|
return captured;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
VM_BH_IFUNC_P(VALUE block_handler)
|
|
|
|
{
|
|
|
|
if ((block_handler & 0x03) == 0x03) {
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
|
2017-04-07 09:41:32 +03:00
|
|
|
VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
|
2016-07-28 14:02:30 +03:00
|
|
|
#endif
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
|
|
|
|
{
|
|
|
|
VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
|
|
|
|
VM_ASSERT(VM_BH_IFUNC_P(block_handler));
|
|
|
|
return block_handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const struct rb_captured_block *
|
|
|
|
VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
|
|
|
|
{
|
|
|
|
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
|
|
|
VM_ASSERT(VM_BH_IFUNC_P(block_handler));
|
|
|
|
return captured;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const struct rb_captured_block *
|
|
|
|
VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
|
|
|
|
{
|
|
|
|
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
|
|
|
VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
|
|
|
|
return captured;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline enum rb_block_handler_type
|
|
|
|
vm_block_handler_type(VALUE block_handler)
|
|
|
|
{
|
|
|
|
if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
|
|
|
|
return block_handler_type_iseq;
|
|
|
|
}
|
|
|
|
else if (VM_BH_IFUNC_P(block_handler)) {
|
|
|
|
return block_handler_type_ifunc;
|
|
|
|
}
|
|
|
|
else if (SYMBOL_P(block_handler)) {
|
|
|
|
return block_handler_type_symbol;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VM_ASSERT(rb_obj_is_proc(block_handler));
|
|
|
|
return block_handler_type_proc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-08 08:22:49 +03:00
|
|
|
static inline void
|
2017-09-11 11:50:07 +03:00
|
|
|
vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
|
2016-07-28 14:02:30 +03:00
|
|
|
{
|
|
|
|
VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
|
2017-02-07 07:24:44 +03:00
|
|
|
(vm_block_handler_type(block_handler), 1));
|
2016-07-28 14:02:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline enum rb_block_type
|
|
|
|
vm_block_type(const struct rb_block *block)
|
|
|
|
{
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
switch (block->type) {
|
|
|
|
case block_type_iseq:
|
2017-04-07 09:41:32 +03:00
|
|
|
VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
|
2016-07-28 14:02:30 +03:00
|
|
|
break;
|
|
|
|
case block_type_ifunc:
|
2017-04-07 09:41:32 +03:00
|
|
|
VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
|
2016-07-28 14:02:30 +03:00
|
|
|
break;
|
|
|
|
case block_type_symbol:
|
|
|
|
VM_ASSERT(SYMBOL_P(block->as.symbol));
|
|
|
|
break;
|
|
|
|
case block_type_proc:
|
|
|
|
VM_ASSERT(rb_obj_is_proc(block->as.proc));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return block->type;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
|
|
|
|
{
|
|
|
|
struct rb_block *mb = (struct rb_block *)block;
|
|
|
|
mb->type = type;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const struct rb_block *
|
|
|
|
vm_proc_block(VALUE procval)
|
|
|
|
{
|
|
|
|
VM_ASSERT(rb_obj_is_proc(procval));
|
2017-03-14 07:03:48 +03:00
|
|
|
return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
|
2016-07-28 14:02:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
|
|
|
|
static inline const VALUE *vm_block_ep(const struct rb_block *block);
|
|
|
|
|
|
|
|
static inline const rb_iseq_t *
|
|
|
|
vm_proc_iseq(VALUE procval)
|
|
|
|
{
|
|
|
|
return vm_block_iseq(vm_proc_block(procval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const VALUE *
|
|
|
|
vm_proc_ep(VALUE procval)
|
|
|
|
{
|
|
|
|
return vm_block_ep(vm_proc_block(procval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const rb_iseq_t *
|
|
|
|
vm_block_iseq(const struct rb_block *block)
|
|
|
|
{
|
|
|
|
switch (vm_block_type(block)) {
|
2017-02-16 12:15:26 +03:00
|
|
|
case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
|
2016-07-28 14:02:30 +03:00
|
|
|
case block_type_proc: return vm_proc_iseq(block->as.proc);
|
|
|
|
case block_type_ifunc:
|
|
|
|
case block_type_symbol: return NULL;
|
|
|
|
}
|
|
|
|
VM_UNREACHABLE(vm_block_iseq);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const VALUE *
|
|
|
|
vm_block_ep(const struct rb_block *block)
|
|
|
|
{
|
|
|
|
switch (vm_block_type(block)) {
|
2016-07-28 14:02:32 +03:00
|
|
|
case block_type_iseq:
|
2016-07-28 14:02:30 +03:00
|
|
|
case block_type_ifunc: return block->as.captured.ep;
|
|
|
|
case block_type_proc: return vm_proc_ep(block->as.proc);
|
|
|
|
case block_type_symbol: return NULL;
|
|
|
|
}
|
|
|
|
VM_UNREACHABLE(vm_block_ep);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
vm_block_self(const struct rb_block *block)
|
|
|
|
{
|
|
|
|
switch (vm_block_type(block)) {
|
|
|
|
case block_type_iseq:
|
|
|
|
case block_type_ifunc:
|
|
|
|
return block->as.captured.self;
|
|
|
|
case block_type_proc:
|
|
|
|
return vm_block_self(vm_proc_block(block->as.proc));
|
|
|
|
case block_type_symbol:
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
VM_UNREACHABLE(vm_block_self);
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
VM_BH_TO_SYMBOL(VALUE block_handler)
|
|
|
|
{
|
|
|
|
VM_ASSERT(SYMBOL_P(block_handler));
|
|
|
|
return block_handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
VM_BH_FROM_SYMBOL(VALUE symbol)
|
|
|
|
{
|
|
|
|
VM_ASSERT(SYMBOL_P(symbol));
|
|
|
|
return symbol;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
VM_BH_TO_PROC(VALUE block_handler)
|
|
|
|
{
|
|
|
|
VM_ASSERT(rb_obj_is_proc(block_handler));
|
|
|
|
return block_handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
VM_BH_FROM_PROC(VALUE procval)
|
|
|
|
{
|
|
|
|
VM_ASSERT(rb_obj_is_proc(procval));
|
|
|
|
return procval;
|
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/* VM related object allocate functions */
|
2008-05-22 20:19:14 +04:00
|
|
|
VALUE rb_thread_alloc(VALUE klass);
|
2014-10-18 15:46:31 +04:00
|
|
|
VALUE rb_binding_alloc(VALUE klass);
|
2017-12-28 23:09:24 +03:00
|
|
|
VALUE rb_proc_alloc(VALUE klass);
|
|
|
|
VALUE rb_proc_dup(VALUE self);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
|
|
|
/* for debug */
|
2023-08-04 09:30:36 +03:00
|
|
|
extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
|
|
|
|
extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
|
|
|
|
extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
|
2012-06-11 07:14:59 +04:00
|
|
|
|
2023-07-31 21:04:42 +03:00
|
|
|
#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
|
|
|
|
#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
|
2023-08-04 09:30:36 +03:00
|
|
|
bool rb_vm_bugreport(const void *, FILE *);
|
2020-12-12 10:06:19 +03:00
|
|
|
typedef void (*ruby_sighandler_t)(int);
|
2021-09-09 08:05:11 +03:00
|
|
|
RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
|
2019-10-09 17:39:58 +03:00
|
|
|
NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
|
* this commit is a result of refactoring. only renaming functions,
moving definitions place, add/remove prototypes, deleting
unused variables and removing yarv.h.
This commit doesn't change any behavior of ruby/vm.
* yarv.h, common.mk: remove yarv.h (contents are moved to yarvcore.h).
* error.c, eval_intern.h: include yarvcore.h instead yarv.h
* rename some functions:
* debug.[ch]: debug_*() -> ruby_debug_*()
* iseq.c: iseq_*() -> rb_iseq_*(), ruby_iseq_disasm()
* iseq.c: node_name() -> ruby_node_name()
* vm.c: yarv_check_redefinition_opt_method() ->
rb_vm_check_redefinition_opt_method()
* some refactoring with checking -Wall.
* array.c: remove rb_ary_ptr() (unused) and remove unused
local variables.
* object.c: add a prototype of rb_mod_module_exec().
* eval_intern.h (ruby_cref): set it inline.
* eval_load.c (rb_load), yarvcore.c: yarv_load() -> rb_load_internal().
* parse.y: add a prototype of rb_parse_in_eval() (in eval.c).
* process.c: add a prototype of rb_thread_stop_timer_thread() (in thread.c).
* thread.c: remove raw_gets() function (unused) and fix some format
mismatch (format mismatchs have remained yet. this is todo).
* thread.c (rb_thread_wait_fd_rw): fix typo on label name.
* thread_pthread.ci: comment out codes with USE_THREAD_CACHE.
* vm.c (rb_svar, rb_backref_get, rb_backref_get,
rb_lastline_get, rb_lastline_set) : moved from yarvcore.c.
* vm.c (yarv_init_redefined_flag): add a prototype and rename
yarv_opt_method_table to vm_opt_method_table.
* vm.c (rb_thread_eval): moved from yarvcore.c.
* yarvcore.c: remove unused global variables and fix to use nsdr().
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11652 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-07 04:25:05 +03:00
|
|
|
|
|
|
|
/* functions about thread/vm execution */
|
2013-04-05 14:29:38 +04:00
|
|
|
RUBY_SYMBOL_EXPORT_BEGIN
|
2015-07-22 01:52:59 +03:00
|
|
|
VALUE rb_iseq_eval(const rb_iseq_t *iseq);
|
|
|
|
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
|
2017-06-01 03:05:33 +03:00
|
|
|
VALUE rb_iseq_path(const rb_iseq_t *iseq);
|
|
|
|
VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
|
2013-04-05 14:29:38 +04:00
|
|
|
RUBY_SYMBOL_EXPORT_END
|
2017-06-01 03:05:33 +03:00
|
|
|
|
|
|
|
VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
|
|
|
|
void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
|
|
|
|
|
2017-10-28 14:11:17 +03:00
|
|
|
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
|
2017-11-07 09:09:47 +03:00
|
|
|
void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
|
2008-05-22 20:19:14 +04:00
|
|
|
|
2019-09-03 19:32:42 +03:00
|
|
|
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
|
2017-11-16 10:25:30 +03:00
|
|
|
|
2017-10-26 11:41:34 +03:00
|
|
|
VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
|
2017-11-16 10:25:30 +03:00
|
|
|
static inline VALUE
|
|
|
|
rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
|
|
|
|
{
|
|
|
|
return rb_vm_make_proc_lambda(ec, captured, klass, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
|
|
|
|
{
|
|
|
|
return rb_vm_make_proc_lambda(ec, captured, klass, 1);
|
|
|
|
}
|
|
|
|
|
2017-11-07 11:01:26 +03:00
|
|
|
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
|
2015-07-14 20:36:36 +03:00
|
|
|
VALUE rb_vm_env_local_variables(const rb_env_t *env);
|
2016-07-28 22:13:26 +03:00
|
|
|
const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
|
2017-06-01 18:12:14 +03:00
|
|
|
const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
|
2010-12-04 05:08:05 +03:00
|
|
|
void rb_vm_inc_const_missing_count(void);
|
2019-09-06 05:25:34 +03:00
|
|
|
VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
|
|
|
|
const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
|
2023-01-16 23:42:51 +03:00
|
|
|
void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
|
2023-03-07 09:02:03 +03:00
|
|
|
void rb_vm_pop_frame(rb_execution_context_t *ec);
|
* this commit is a result of refactoring. only renaming functions,
moving definitions place, add/remove prototypes, deleting
unused variables and removing yarv.h.
This commit doesn't change any behavior of ruby/vm.
* yarv.h, common.mk: remove yarv.h (contents are moved to yarvcore.h).
* error.c, eval_intern.h: include yarvcore.h instead yarv.h
* rename some functions:
* debug.[ch]: debug_*() -> ruby_debug_*()
* iseq.c: iseq_*() -> rb_iseq_*(), ruby_iseq_disasm()
* iseq.c: node_name() -> ruby_node_name()
* vm.c: yarv_check_redefinition_opt_method() ->
rb_vm_check_redefinition_opt_method()
* some refactoring with checking -Wall.
* array.c: remove rb_ary_ptr() (unused) and remove unused
local variables.
* object.c: add a prototype of rb_mod_module_exec().
* eval_intern.h (ruby_cref): set it inline.
* eval_load.c (rb_load), yarvcore.c: yarv_load() -> rb_load_internal().
* parse.y: add a prototype of rb_parse_in_eval() (in eval.c).
* process.c: add a prototype of rb_thread_stop_timer_thread() (in thread.c).
* thread.c: remove raw_gets() function (unused) and fix some format
mismatch (format mismatchs have remained yet. this is todo).
* thread.c (rb_thread_wait_fd_rw): fix typo on label name.
* thread_pthread.ci: comment out codes with USE_THREAD_CACHE.
* vm.c (rb_svar, rb_backref_get, rb_backref_get,
rb_lastline_get, rb_lastline_set) : moved from yarvcore.c.
* vm.c (yarv_init_redefined_flag): add a prototype and rename
yarv_opt_method_table to vm_opt_method_table.
* vm.c (rb_thread_eval): moved from yarvcore.c.
* yarvcore.c: remove unused global variables and fix to use nsdr().
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11652 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-07 04:25:05 +03:00
|
|
|
|
2010-10-06 04:08:44 +04:00
|
|
|
void rb_thread_start_timer_thread(void);
|
2015-08-14 12:44:10 +03:00
|
|
|
void rb_thread_stop_timer_thread(void);
|
2010-10-06 04:08:44 +04:00
|
|
|
void rb_thread_reset_timer_thread(void);
|
2018-08-14 00:34:20 +03:00
|
|
|
void rb_thread_wakeup_timer_thread(int);
|
2011-06-27 04:30:41 +04:00
|
|
|
|
vm*: doubly-linked list from ccan to manage vm->living_threads
A doubly-linked list for tracking living threads guarantees
constant-time insert/delete performance with no corner cases of a
hash table. I chose this ccan implementation of doubly-linked
lists over the BSD sys/queue.h implementation since:
1) insertion and removal are both branchless
2) locality is improved if a struct may be a member of multiple lists
(0002 patch in Feature 9632 will introduce a secondary list
for waiting FDs)
This also increases cache locality during iteration: improving
performance in a new IO#close benchmark with many sleeping threads
while still scanning the same number of threads.
vm_thread_close 1.762
* vm_core.h (rb_vm_t): list_head and counter for living_threads
(rb_thread_t): vmlt_node for living_threads linkage
(rb_vm_living_threads_init): new function wrapper
(rb_vm_living_threads_insert): ditto
(rb_vm_living_threads_remove): ditto
* vm.c (rb_vm_living_threads_foreach): new function wrapper
* thread.c (terminate_i, thread_start_func_2, thread_create_core,
thread_fd_close_i, thread_fd_close): update to use new APIs
* vm.c (vm_mark_each_thread_func, rb_vm_mark, ruby_vm_destruct,
vm_memsize, vm_init2, Init_VM): ditto
* vm_trace.c (clear_trace_func_i, rb_clear_trace_func): ditto
* benchmark/bm_vm_thread_close.rb: added to show improvement
* ccan/build_assert/build_assert.h: added as a dependency of list.h
* ccan/check_type/check_type.h: ditto
* ccan/container_of/container_of.h: ditto
* ccan/licenses/BSD-MIT: ditto
* ccan/licenses/CC0: ditto
* ccan/str/str.h: ditto (stripped of unused macros)
* ccan/list/list.h: ditto
* common.mk: add CCAN_LIST_INCLUDES
[ruby-core:61871][Feature 9632 (part 1)]
Apologies for the size of this commit, but I think a good
doubly-linked list will be useful for future features, too.
This may be used to add ordering to a container_of-based hash
table to preserve compatibility if required (e.g. feature 9614).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@45913 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-05-11 03:48:51 +04:00
|
|
|
static inline void
|
|
|
|
rb_vm_living_threads_init(rb_vm_t *vm)
|
|
|
|
{
|
2022-03-30 10:36:31 +03:00
|
|
|
ccan_list_head_init(&vm->waiting_fds);
|
|
|
|
ccan_list_head_init(&vm->workqueue);
|
|
|
|
ccan_list_head_init(&vm->ractor.set);
|
2023-04-10 04:53:13 +03:00
|
|
|
ccan_list_head_init(&vm->ractor.sched.zombie_threads);
|
vm*: doubly-linked list from ccan to manage vm->living_threads
A doubly-linked list for tracking living threads guarantees
constant-time insert/delete performance with no corner cases of a
hash table. I chose this ccan implementation of doubly-linked
lists over the BSD sys/queue.h implementation since:
1) insertion and removal are both branchless
2) locality is improved if a struct may be a member of multiple lists
(0002 patch in Feature 9632 will introduce a secondary list
for waiting FDs)
This also increases cache locality during iteration: improving
performance in a new IO#close benchmark with many sleeping threads
while still scanning the same number of threads.
vm_thread_close 1.762
* vm_core.h (rb_vm_t): list_head and counter for living_threads
(rb_thread_t): vmlt_node for living_threads linkage
(rb_vm_living_threads_init): new function wrapper
(rb_vm_living_threads_insert): ditto
(rb_vm_living_threads_remove): ditto
* vm.c (rb_vm_living_threads_foreach): new function wrapper
* thread.c (terminate_i, thread_start_func_2, thread_create_core,
thread_fd_close_i, thread_fd_close): update to use new APIs
* vm.c (vm_mark_each_thread_func, rb_vm_mark, ruby_vm_destruct,
vm_memsize, vm_init2, Init_VM): ditto
* vm_trace.c (clear_trace_func_i, rb_clear_trace_func): ditto
* benchmark/bm_vm_thread_close.rb: added to show improvement
* ccan/build_assert/build_assert.h: added as a dependency of list.h
* ccan/check_type/check_type.h: ditto
* ccan/container_of/container_of.h: ditto
* ccan/licenses/BSD-MIT: ditto
* ccan/licenses/CC0: ditto
* ccan/str/str.h: ditto (stripped of unused macros)
* ccan/list/list.h: ditto
* common.mk: add CCAN_LIST_INCLUDES
[ruby-core:61871][Feature 9632 (part 1)]
Apologies for the size of this commit, but I think a good
doubly-linked list will be useful for future features, too.
This may be used to add ordering to a container_of-based hash
table to preserve compatibility if required (e.g. feature 9614).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@45913 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-05-11 03:48:51 +04:00
|
|
|
}
|
|
|
|
|
2009-08-16 05:38:32 +04:00
|
|
|
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
|
2017-10-26 11:41:34 +03:00
|
|
|
rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
|
2017-11-07 11:01:26 +03:00
|
|
|
rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
|
2023-02-10 10:02:20 +03:00
|
|
|
VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
|
2010-05-13 08:09:26 +04:00
|
|
|
int rb_vm_get_sourceline(const rb_control_frame_t *);
|
2017-10-28 13:47:19 +03:00
|
|
|
void rb_vm_stack_to_heap(rb_execution_context_t *ec);
|
Pass down "stack start" variables from closer to the top of the stack
This commit changes how stack extents are calculated for both the main
thread and other threads. Ruby uses the address of a local variable as
part of the calculation for machine stack extents:
* pthreads uses it as a lower-bound on the start of the stack, because
glibc (and maybe other libcs) can store its own data on the stack
before calling into user code on thread creation.
* win32 uses it as an argument to VirtualQuery, which gets the extent of
the memory mapping which contains the variable
However, the local being used for this is actually too low (too close to
the leaf function call) in both the main thread case and the new thread
case.
In the main thread case, we have the `INIT_STACK` macro, which is used
for pthreads to set the `native_main_thread->stack_start` value. This
value is correctly captured at the very top level of the program (in
main.c). However, this is _not_ what's used to set the execution context
machine stack (`th->ec->machine_stack.stack_start`); that gets set as
part of a call to `ruby_thread_init_stack` in `Init_BareVM`, using the
address of a local variable allocated _inside_ `Init_BareVM`. This is
too low; we need to use a local allocated closer to the top of the
program.
In the new thread case, the lolcal is allocated inside
`native_thread_init_stack`, which is, again, too low.
In both cases, this means that we might have VALUEs lying outside the
bounds of `th->ec->machine.stack_{start,end}`, which won't be marked
correctly by the GC machinery.
To fix this,
* In the main thread case: We already have `INIT_STACK` at the right
level, so just pass that local var to `ruby_thread_init_stack`.
* In the new thread case: Allocate the local one level above the call to
`native_thread_init_stack` in `call_thread_start_func2`.
[Bug #20001]
fix
2023-11-12 05:24:55 +03:00
|
|
|
void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
|
2021-06-26 01:17:26 +03:00
|
|
|
rb_thread_t * ruby_thread_from_native(void);
|
|
|
|
int ruby_thread_set_native(rb_thread_t *th);
|
* vm_trace.c (tracepoint_attr_callee_id, rb_tracearg_callee_id):
add TracePoint#callee_id. [ruby-core:77241] [Feature #12747]
* cont.c, eval.c, gc.c, include/ruby/intern.h, insns.def, thread.c,
vm.c, vm_backtrace.c, vm_core.h, vm_eval.c, vm_insnhelper.c, vm_trace.c: ditto.
* test/ruby/test_settracefunc.rb: tests for above.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@56593 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2016-11-05 16:15:27 +03:00
|
|
|
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
|
2017-10-28 13:43:30 +03:00
|
|
|
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
|
2023-08-17 17:11:17 +03:00
|
|
|
void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
|
2023-03-07 09:02:03 +03:00
|
|
|
VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
|
2009-01-12 04:43:23 +03:00
|
|
|
|
2017-04-09 07:01:07 +03:00
|
|
|
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
|
|
|
|
|
|
|
|
#define rb_vm_register_special_exception(sp, e, m) \
|
|
|
|
rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
|
2014-09-11 14:53:48 +04:00
|
|
|
|
2024-02-24 11:32:17 +03:00
|
|
|
void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
|
2007-11-20 13:47:53 +03:00
|
|
|
|
2015-06-03 22:12:26 +03:00
|
|
|
void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
|
2015-02-22 10:05:14 +03:00
|
|
|
|
2023-03-07 09:02:03 +03:00
|
|
|
const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
|
2015-06-02 07:20:30 +03:00
|
|
|
|
2008-06-15 13:17:06 +04:00
|
|
|
#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
|
2007-07-02 03:57:04 +04:00
|
|
|
|
2020-07-06 09:39:26 +03:00
|
|
|
#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
|
|
|
|
STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
|
|
|
|
STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
|
|
|
|
const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
|
|
|
|
if (UNLIKELY((cfp) <= &bound[1])) { \
|
|
|
|
vm_stackoverflow(); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2013-11-18 06:29:58 +04:00
|
|
|
#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
|
2020-07-06 09:39:26 +03:00
|
|
|
CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
|
2012-12-25 13:57:07 +04:00
|
|
|
|
2017-06-23 11:24:54 +03:00
|
|
|
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
|
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
|
|
|
|
|
2006-12-31 18:02:22 +03:00
|
|
|
/* for thread */
|
|
|
|
|
* this commit is a result of refactoring. only renaming functions,
moving definitions place, add/remove prototypes, deleting
unused variables and removing yarv.h.
This commit doesn't change any behavior of ruby/vm.
* yarv.h, common.mk: remove yarv.h (contents are moved to yarvcore.h).
* error.c, eval_intern.h: include yarvcore.h instead yarv.h
* rename some functions:
* debug.[ch]: debug_*() -> ruby_debug_*()
* iseq.c: iseq_*() -> rb_iseq_*(), ruby_iseq_disasm()
* iseq.c: node_name() -> ruby_node_name()
* vm.c: yarv_check_redefinition_opt_method() ->
rb_vm_check_redefinition_opt_method()
* some refactoring with checking -Wall.
* array.c: remove rb_ary_ptr() (unused) and remove unused
local variables.
* object.c: add a prototype of rb_mod_module_exec().
* eval_intern.h (ruby_cref): set it inline.
* eval_load.c (rb_load), yarvcore.c: yarv_load() -> rb_load_internal().
* parse.y: add a prototype of rb_parse_in_eval() (in eval.c).
* process.c: add a prototype of rb_thread_stop_timer_thread() (in thread.c).
* thread.c: remove raw_gets() function (unused) and fix some format
mismatch (format mismatchs have remained yet. this is todo).
* thread.c (rb_thread_wait_fd_rw): fix typo on label name.
* thread_pthread.ci: comment out codes with USE_THREAD_CACHE.
* vm.c (rb_svar, rb_backref_get, rb_backref_get,
rb_lastline_get, rb_lastline_set) : moved from yarvcore.c.
* vm.c (yarv_init_redefined_flag): add a prototype and rename
yarv_opt_method_table to vm_opt_method_table.
* vm.c (rb_thread_eval): moved from yarvcore.c.
* yarvcore.c: remove unused global variables and fix to use nsdr().
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11652 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-07 04:25:05 +03:00
|
|
|
#if RUBY_VM_THREAD_MODEL == 2
|
2017-10-21 09:22:43 +03:00
|
|
|
|
2020-12-06 18:07:30 +03:00
|
|
|
RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
|
2018-08-07 19:27:45 +03:00
|
|
|
RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
|
|
|
|
RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
|
2018-11-26 21:16:39 +03:00
|
|
|
RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
|
|
|
|
RUBY_EXTERN unsigned int ruby_vm_event_local_num;
|
* this commit is a result of refactoring. only renaming functions,
moving definitions place, add/remove prototypes, deleting
unused variables and removing yarv.h.
This commit doesn't change any behavior of ruby/vm.
* yarv.h, common.mk: remove yarv.h (contents are moved to yarvcore.h).
* error.c, eval_intern.h: include yarvcore.h instead yarv.h
* rename some functions:
* debug.[ch]: debug_*() -> ruby_debug_*()
* iseq.c: iseq_*() -> rb_iseq_*(), ruby_iseq_disasm()
* iseq.c: node_name() -> ruby_node_name()
* vm.c: yarv_check_redefinition_opt_method() ->
rb_vm_check_redefinition_opt_method()
* some refactoring with checking -Wall.
* array.c: remove rb_ary_ptr() (unused) and remove unused
local variables.
* object.c: add a prototype of rb_mod_module_exec().
* eval_intern.h (ruby_cref): set it inline.
* eval_load.c (rb_load), yarvcore.c: yarv_load() -> rb_load_internal().
* parse.y: add a prototype of rb_parse_in_eval() (in eval.c).
* process.c: add a prototype of rb_thread_stop_timer_thread() (in thread.c).
* thread.c: remove raw_gets() function (unused) and fix some format
mismatch (format mismatchs have remained yet. this is todo).
* thread.c (rb_thread_wait_fd_rw): fix typo on label name.
* thread_pthread.ci: comment out codes with USE_THREAD_CACHE.
* vm.c (rb_svar, rb_backref_get, rb_backref_get,
rb_lastline_get, rb_lastline_set) : moved from yarvcore.c.
* vm.c (yarv_init_redefined_flag): add a prototype and rename
yarv_opt_method_table to vm_opt_method_table.
* vm.c (rb_thread_eval): moved from yarvcore.c.
* yarvcore.c: remove unused global variables and fix to use nsdr().
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11652 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-07 04:25:05 +03:00
|
|
|
|
2017-11-07 09:01:16 +03:00
|
|
|
#define GET_VM() rb_current_vm()
|
2020-03-09 20:22:11 +03:00
|
|
|
#define GET_RACTOR() rb_current_ractor()
|
2017-11-07 09:01:16 +03:00
|
|
|
#define GET_THREAD() rb_current_thread()
|
2021-06-01 10:15:51 +03:00
|
|
|
#define GET_EC() rb_current_execution_context(true)
|
2017-10-26 11:32:49 +03:00
|
|
|
|
|
|
|
static inline rb_thread_t *
|
|
|
|
rb_ec_thread_ptr(const rb_execution_context_t *ec)
|
|
|
|
{
|
2017-10-29 15:57:04 +03:00
|
|
|
return ec->thread_ptr;
|
2017-10-26 11:32:49 +03:00
|
|
|
}
|
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
static inline rb_ractor_t *
|
|
|
|
rb_ec_ractor_ptr(const rb_execution_context_t *ec)
|
|
|
|
{
|
|
|
|
const rb_thread_t *th = rb_ec_thread_ptr(ec);
|
|
|
|
if (th) {
|
|
|
|
VM_ASSERT(th->ractor != NULL);
|
|
|
|
return th->ractor;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-26 11:32:49 +03:00
|
|
|
static inline rb_vm_t *
|
|
|
|
rb_ec_vm_ptr(const rb_execution_context_t *ec)
|
|
|
|
{
|
2017-10-29 15:57:04 +03:00
|
|
|
const rb_thread_t *th = rb_ec_thread_ptr(ec);
|
2017-10-26 11:32:49 +03:00
|
|
|
if (th) {
|
2017-10-29 15:57:04 +03:00
|
|
|
return th->vm;
|
2017-10-26 11:32:49 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline rb_execution_context_t *
|
2021-06-01 10:15:51 +03:00
|
|
|
rb_current_execution_context(bool expect_ec)
|
2017-10-26 11:32:49 +03:00
|
|
|
{
|
2020-10-19 10:47:32 +03:00
|
|
|
#ifdef RB_THREAD_LOCAL_SPECIFIER
|
2021-04-29 15:31:05 +03:00
|
|
|
#ifdef __APPLE__
|
2020-10-19 10:47:32 +03:00
|
|
|
rb_execution_context_t *ec = rb_current_ec();
|
|
|
|
#else
|
|
|
|
rb_execution_context_t *ec = ruby_current_ec;
|
|
|
|
#endif
|
2023-04-10 04:53:13 +03:00
|
|
|
|
|
|
|
/* On the shared objects, `__tls_get_addr()` is used to access the TLS
|
|
|
|
* and the address of the `ruby_current_ec` can be stored on a function
|
|
|
|
* frame. However, this address can be mis-used after native thread
|
|
|
|
* migration of a coroutine.
|
|
|
|
* 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
|
|
|
|
* 2) Context switch and resume it on the NT2.
|
|
|
|
* 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
|
|
|
|
* This assertion checks such misusage.
|
|
|
|
*
|
|
|
|
* To avoid accidents, `GET_EC()` should be called once on the frame.
|
|
|
|
* Note that inlining can produce the problem.
|
|
|
|
*/
|
|
|
|
VM_ASSERT(ec == rb_current_ec_noinline());
|
2020-10-19 10:47:32 +03:00
|
|
|
#else
|
2020-03-09 20:22:11 +03:00
|
|
|
rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
|
2020-10-19 10:47:32 +03:00
|
|
|
#endif
|
2021-06-01 10:15:51 +03:00
|
|
|
VM_ASSERT(!expect_ec || ec != NULL);
|
2020-03-09 20:22:11 +03:00
|
|
|
return ec;
|
2017-10-26 11:32:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline rb_thread_t *
|
2017-11-07 09:01:16 +03:00
|
|
|
rb_current_thread(void)
|
2017-10-26 11:32:49 +03:00
|
|
|
{
|
|
|
|
const rb_execution_context_t *ec = GET_EC();
|
|
|
|
return rb_ec_thread_ptr(ec);
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
static inline rb_ractor_t *
|
2023-03-29 20:50:51 +03:00
|
|
|
rb_current_ractor_raw(bool expect)
|
2020-03-09 20:22:11 +03:00
|
|
|
{
|
2020-12-01 21:37:56 +03:00
|
|
|
if (ruby_single_main_ractor) {
|
|
|
|
return ruby_single_main_ractor;
|
|
|
|
}
|
|
|
|
else {
|
2023-03-29 20:50:51 +03:00
|
|
|
const rb_execution_context_t *ec = rb_current_execution_context(expect);
|
|
|
|
return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
|
2020-12-01 21:37:56 +03:00
|
|
|
}
|
2020-03-09 20:22:11 +03:00
|
|
|
}
|
|
|
|
|
2023-03-29 20:50:51 +03:00
|
|
|
static inline rb_ractor_t *
|
|
|
|
rb_current_ractor(void)
|
|
|
|
{
|
|
|
|
return rb_current_ractor_raw(true);
|
|
|
|
}
|
|
|
|
|
2017-10-26 11:32:49 +03:00
|
|
|
static inline rb_vm_t *
|
2017-11-07 09:01:16 +03:00
|
|
|
rb_current_vm(void)
|
2017-10-26 11:32:49 +03:00
|
|
|
{
|
2020-03-09 20:22:11 +03:00
|
|
|
#if 0 // TODO: reconsider the assertions
|
2017-10-26 11:32:49 +03:00
|
|
|
VM_ASSERT(ruby_current_vm_ptr == NULL ||
|
|
|
|
ruby_current_execution_context_ptr == NULL ||
|
|
|
|
rb_ec_thread_ptr(GET_EC()) == NULL ||
|
2020-08-21 22:52:02 +03:00
|
|
|
rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
|
2017-10-26 11:32:49 +03:00
|
|
|
rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
|
2020-03-09 20:22:11 +03:00
|
|
|
#endif
|
2018-11-18 04:37:41 +03:00
|
|
|
|
2020-03-09 20:22:11 +03:00
|
|
|
return ruby_current_vm_ptr;
|
2018-11-18 04:37:41 +03:00
|
|
|
}
|
* this commit is a result of refactoring. only renaming functions,
moving definitions place, add/remove prototypes, deleting
unused variables and removing yarv.h.
This commit doesn't change any behavior of ruby/vm.
* yarv.h, common.mk: remove yarv.h (contents are moved to yarvcore.h).
* error.c, eval_intern.h: include yarvcore.h instead yarv.h
* rename some functions:
* debug.[ch]: debug_*() -> ruby_debug_*()
* iseq.c: iseq_*() -> rb_iseq_*(), ruby_iseq_disasm()
* iseq.c: node_name() -> ruby_node_name()
* vm.c: yarv_check_redefinition_opt_method() ->
rb_vm_check_redefinition_opt_method()
* some refactoring with checking -Wall.
* array.c: remove rb_ary_ptr() (unused) and remove unused
local variables.
* object.c: add a prototype of rb_mod_module_exec().
* eval_intern.h (ruby_cref): set it inline.
* eval_load.c (rb_load), yarvcore.c: yarv_load() -> rb_load_internal().
* parse.y: add a prototype of rb_parse_in_eval() (in eval.c).
* process.c: add a prototype of rb_thread_stop_timer_thread() (in thread.c).
* thread.c: remove raw_gets() function (unused) and fix some format
mismatch (format mismatchs have remained yet. this is todo).
* thread.c (rb_thread_wait_fd_rw): fix typo on label name.
* thread_pthread.ci: comment out codes with USE_THREAD_CACHE.
* vm.c (rb_svar, rb_backref_get, rb_backref_get,
rb_lastline_get, rb_lastline_set) : moved from yarvcore.c.
* vm.c (yarv_init_redefined_flag): add a prototype and rename
yarv_opt_method_table to vm_opt_method_table.
* vm.c (rb_thread_eval): moved from yarvcore.c.
* yarvcore.c: remove unused global variables and fix to use nsdr().
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11652 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-07 04:25:05 +03:00
|
|
|
|
2020-10-14 08:21:57 +03:00
|
|
|
void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
|
|
|
|
unsigned int recorded_lock_rec,
|
|
|
|
unsigned int current_lock_rec);
|
|
|
|
|
|
|
|
static inline unsigned int
|
|
|
|
rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
|
2020-10-13 20:03:21 +03:00
|
|
|
{
|
|
|
|
rb_vm_t *vm = rb_ec_vm_ptr(ec);
|
|
|
|
|
|
|
|
if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return vm->ractor.sync.lock_rec;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
* this commit is a result of refactoring. only renaming functions,
moving definitions place, add/remove prototypes, deleting
unused variables and removing yarv.h.
This commit doesn't change any behavior of ruby/vm.
* yarv.h, common.mk: remove yarv.h (contents are moved to yarvcore.h).
* error.c, eval_intern.h: include yarvcore.h instead yarv.h
* rename some functions:
* debug.[ch]: debug_*() -> ruby_debug_*()
* iseq.c: iseq_*() -> rb_iseq_*(), ruby_iseq_disasm()
* iseq.c: node_name() -> ruby_node_name()
* vm.c: yarv_check_redefinition_opt_method() ->
rb_vm_check_redefinition_opt_method()
* some refactoring with checking -Wall.
* array.c: remove rb_ary_ptr() (unused) and remove unused
local variables.
* object.c: add a prototype of rb_mod_module_exec().
* eval_intern.h (ruby_cref): set it inline.
* eval_load.c (rb_load), yarvcore.c: yarv_load() -> rb_load_internal().
* parse.y: add a prototype of rb_parse_in_eval() (in eval.c).
* process.c: add a prototype of rb_thread_stop_timer_thread() (in thread.c).
* thread.c: remove raw_gets() function (unused) and fix some format
mismatch (format mismatchs have remained yet. this is todo).
* thread.c (rb_thread_wait_fd_rw): fix typo on label name.
* thread_pthread.ci: comment out codes with USE_THREAD_CACHE.
* vm.c (rb_svar, rb_backref_get, rb_backref_get,
rb_lastline_get, rb_lastline_set) : moved from yarvcore.c.
* vm.c (yarv_init_redefined_flag): add a prototype and rename
yarv_opt_method_table to vm_opt_method_table.
* vm.c (rb_thread_eval): moved from yarvcore.c.
* yarvcore.c: remove unused global variables and fix to use nsdr().
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@11652 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-02-07 04:25:05 +03:00
|
|
|
#else
|
|
|
|
#error "unsupported thread model"
|
|
|
|
#endif
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2012-11-26 16:17:10 +04:00
|
|
|
enum {
|
2013-05-27 01:30:44 +04:00
|
|
|
TIMER_INTERRUPT_MASK = 0x01,
|
|
|
|
PENDING_INTERRUPT_MASK = 0x02,
|
|
|
|
POSTPONED_JOB_INTERRUPT_MASK = 0x04,
|
2020-03-09 20:22:11 +03:00
|
|
|
TRAP_INTERRUPT_MASK = 0x08,
|
|
|
|
TERMINATE_INTERRUPT_MASK = 0x10,
|
|
|
|
VM_BARRIER_INTERRUPT_MASK = 0x20,
|
2012-11-26 16:17:10 +04:00
|
|
|
};
|
|
|
|
|
2017-11-06 10:44:28 +03:00
|
|
|
#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
|
|
|
|
#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
|
|
|
|
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
|
|
|
|
#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
|
2020-03-09 20:22:11 +03:00
|
|
|
#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
|
|
|
|
#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
|
2017-11-06 10:44:28 +03:00
|
|
|
#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
|
|
|
|
(PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
|
2020-11-11 08:37:31 +03:00
|
|
|
|
|
|
|
static inline bool
|
|
|
|
RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
|
|
|
|
{
|
2021-04-29 15:31:05 +03:00
|
|
|
#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
|
2020-11-11 08:37:31 +03:00
|
|
|
uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
|
|
|
|
|
|
|
|
if (current_clock != ec->checked_clock) {
|
|
|
|
ec->checked_clock = current_clock;
|
|
|
|
RUBY_VM_SET_TIMER_INTERRUPT(ec);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return ec->interrupt_flag & ~(ec)->interrupt_mask;
|
|
|
|
}
|
2007-12-25 07:16:06 +03:00
|
|
|
|
2016-05-29 13:18:20 +03:00
|
|
|
VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
|
2010-10-10 00:33:21 +04:00
|
|
|
int rb_signal_buff_size(void);
|
2018-08-18 12:07:36 +03:00
|
|
|
int rb_signal_exec(rb_thread_t *th, int sig);
|
2010-10-06 04:08:44 +04:00
|
|
|
void rb_threadptr_check_signal(rb_thread_t *mth);
|
2009-06-08 20:14:06 +04:00
|
|
|
void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
|
|
|
|
void rb_threadptr_signal_exit(rb_thread_t *th);
|
2018-08-18 12:07:36 +03:00
|
|
|
int rb_threadptr_execute_interrupts(rb_thread_t *, int);
|
2011-06-18 12:26:19 +04:00
|
|
|
void rb_threadptr_interrupt(rb_thread_t *th);
|
2011-07-08 08:40:01 +04:00
|
|
|
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
|
2012-12-23 14:18:58 +04:00
|
|
|
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
|
|
|
|
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
|
2019-12-16 03:44:01 +03:00
|
|
|
VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
|
2017-10-29 17:06:58 +03:00
|
|
|
void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
|
2022-12-01 13:00:33 +03:00
|
|
|
void rb_execution_context_update(rb_execution_context_t *ec);
|
2017-09-10 22:00:08 +03:00
|
|
|
void rb_execution_context_mark(const rb_execution_context_t *ec);
|
2017-10-26 11:32:49 +03:00
|
|
|
void rb_fiber_close(rb_fiber_t *fib);
|
|
|
|
void Init_native_thread(rb_thread_t *th);
|
2020-09-14 04:30:22 +03:00
|
|
|
int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2020-09-11 12:30:27 +03:00
|
|
|
// vm_sync.h
|
|
|
|
void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
|
|
|
|
void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
|
|
|
|
|
2017-11-07 09:01:16 +03:00
|
|
|
#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
|
2015-07-17 10:28:53 +03:00
|
|
|
static inline void
|
2017-11-07 09:01:16 +03:00
|
|
|
rb_vm_check_ints(rb_execution_context_t *ec)
|
2015-07-17 10:28:53 +03:00
|
|
|
{
|
2022-05-16 15:50:02 +03:00
|
|
|
#ifdef RUBY_ASSERT_CRITICAL_SECTION
|
|
|
|
VM_ASSERT(ruby_assert_critical_section_entered == 0);
|
|
|
|
#endif
|
|
|
|
|
2017-11-07 09:01:16 +03:00
|
|
|
VM_ASSERT(ec == GET_EC());
|
2022-05-16 15:50:02 +03:00
|
|
|
|
2017-11-06 10:44:28 +03:00
|
|
|
if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
|
2022-05-16 15:50:02 +03:00
|
|
|
rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
|
2015-07-17 10:28:53 +03:00
|
|
|
}
|
|
|
|
}
|
2006-12-31 18:02:22 +03:00
|
|
|
|
2007-07-02 16:49:35 +04:00
|
|
|
/* tracer */
|
2018-11-26 21:16:39 +03:00
|
|
|
|
2012-12-21 13:48:15 +04:00
|
|
|
struct rb_trace_arg_struct {
|
2012-11-20 13:48:24 +04:00
|
|
|
rb_event_flag_t event;
|
2017-10-29 16:17:37 +03:00
|
|
|
rb_execution_context_t *ec;
|
|
|
|
const rb_control_frame_t *cfp;
|
2012-11-20 13:48:24 +04:00
|
|
|
VALUE self;
|
|
|
|
ID id;
|
* vm_trace.c (tracepoint_attr_callee_id, rb_tracearg_callee_id):
add TracePoint#callee_id. [ruby-core:77241] [Feature #12747]
* cont.c, eval.c, gc.c, include/ruby/intern.h, insns.def, thread.c,
vm.c, vm_backtrace.c, vm_core.h, vm_eval.c, vm_insnhelper.c, vm_trace.c: ditto.
* test/ruby/test_settracefunc.rb: tests for above.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@56593 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2016-11-05 16:15:27 +03:00
|
|
|
ID called_id;
|
2012-11-20 13:48:24 +04:00
|
|
|
VALUE klass;
|
|
|
|
VALUE data;
|
2012-11-27 03:01:45 +04:00
|
|
|
|
|
|
|
int klass_solved;
|
|
|
|
|
|
|
|
/* calc from cfp */
|
2012-11-27 03:25:21 +04:00
|
|
|
int lineno;
|
|
|
|
VALUE path;
|
2012-12-21 13:48:15 +04:00
|
|
|
};
|
2012-11-20 13:48:24 +04:00
|
|
|
|
2018-11-26 21:16:39 +03:00
|
|
|
void rb_hook_list_mark(rb_hook_list_t *hooks);
|
2023-01-19 22:47:17 +03:00
|
|
|
void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
|
2018-11-26 21:16:39 +03:00
|
|
|
void rb_hook_list_free(rb_hook_list_t *hooks);
|
2018-11-26 23:16:14 +03:00
|
|
|
void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
|
2018-11-26 21:16:39 +03:00
|
|
|
void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
|
2012-11-20 13:48:24 +04:00
|
|
|
|
2018-11-26 21:16:39 +03:00
|
|
|
void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
|
|
|
|
|
|
|
|
#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
|
2015-07-17 10:28:22 +03:00
|
|
|
const rb_event_flag_t flag_arg_ = (flag_); \
|
2018-11-26 21:16:39 +03:00
|
|
|
rb_hook_list_t *hooks_arg_ = (hooks_); \
|
|
|
|
if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
|
|
|
|
/* defer evaluating the other arguments */ \
|
2018-11-26 21:16:54 +03:00
|
|
|
rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
|
2007-07-02 16:49:35 +04:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2015-07-17 10:28:22 +03:00
|
|
|
static inline void
|
2018-11-26 21:16:39 +03:00
|
|
|
rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
|
|
|
|
VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
|
2015-07-17 10:28:22 +03:00
|
|
|
{
|
2017-11-17 09:59:22 +03:00
|
|
|
struct rb_trace_arg_struct trace_arg;
|
|
|
|
|
2018-11-26 21:16:39 +03:00
|
|
|
VM_ASSERT((hooks->events & flag) != 0);
|
2017-11-17 09:59:22 +03:00
|
|
|
|
|
|
|
trace_arg.event = flag;
|
|
|
|
trace_arg.ec = ec;
|
|
|
|
trace_arg.cfp = ec->cfp;
|
|
|
|
trace_arg.self = self;
|
|
|
|
trace_arg.id = id;
|
|
|
|
trace_arg.called_id = called_id;
|
|
|
|
trace_arg.klass = klass;
|
|
|
|
trace_arg.data = data;
|
|
|
|
trace_arg.path = Qundef;
|
|
|
|
trace_arg.klass_solved = 0;
|
2018-11-26 21:16:39 +03:00
|
|
|
|
|
|
|
rb_exec_event_hooks(&trace_arg, hooks, pop_p);
|
|
|
|
}
|
|
|
|
|
2020-12-19 19:44:41 +03:00
|
|
|
struct rb_ractor_pub {
|
|
|
|
VALUE self;
|
|
|
|
uint32_t id;
|
|
|
|
rb_hook_list_t hooks;
|
|
|
|
};
|
2020-12-19 00:38:58 +03:00
|
|
|
|
2018-11-26 21:16:39 +03:00
|
|
|
static inline rb_hook_list_t *
|
2020-12-19 00:38:58 +03:00
|
|
|
rb_ec_ractor_hooks(const rb_execution_context_t *ec)
|
2018-11-26 21:16:39 +03:00
|
|
|
{
|
2020-12-19 19:44:41 +03:00
|
|
|
struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
|
|
|
|
return &cr_pub->hooks;
|
2015-07-17 10:28:22 +03:00
|
|
|
}
|
|
|
|
|
2017-10-29 16:19:14 +03:00
|
|
|
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
|
2020-12-19 00:38:58 +03:00
|
|
|
EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
|
2017-11-17 09:59:22 +03:00
|
|
|
|
2017-10-29 16:19:14 +03:00
|
|
|
#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
|
2020-12-19 00:38:58 +03:00
|
|
|
EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
|
2012-12-25 17:24:17 +04:00
|
|
|
|
2018-12-27 20:39:17 +03:00
|
|
|
static inline void
|
|
|
|
rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
|
|
|
|
{
|
|
|
|
EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
|
2018-12-27 20:39:21 +03:00
|
|
|
NIL_P(eval_script) ? (VALUE)iseq :
|
2018-12-27 20:39:17 +03:00
|
|
|
rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
|
|
|
|
}
|
|
|
|
|
2019-08-12 11:44:30 +03:00
|
|
|
void rb_vm_trap_exit(rb_vm_t *vm);
|
Change the semantics of rb_postponed_job_register
Our current implementation of rb_postponed_job_register suffers from
some safety issues that can lead to interpreter crashes (see bug #1991).
Essentially, the issue is that jobs can be called with the wrong
arguments.
We made two attempts to fix this whilst keeping the promised semantics,
but:
* The first one involved masking/unmasking when flushing jobs, which
was believed to be too expensive
* The second one involved a lock-free, multi-producer, single-consumer
ringbuffer, which was too complex
The critical insight behind this third solution is that essentially the
only user of these APIs are a) internal, or b) profiling gems.
For a), none of the usages actually require variable data; they will
work just fine with the preregistration interface.
For b), generally profiling gems only call a single callback with a
single piece of data (which is actually usually just zero) for the life
of the program. The ringbuffer is complex because it needs to support
multi-word inserts of job & data (which can't be atomic); but nobody
actually even needs that functionality, really.
So, this comit:
* Introduces a pre-registration API for jobs, with a GVL-requiring
rb_postponed_job_prereigster, which returns a handle which can be
used with an async-signal-safe rb_postponed_job_trigger.
* Deprecates rb_postponed_job_register (and re-implements it on top of
the preregister function for compatability)
* Moves all the internal usages of postponed job register
pre-registration
2023-11-19 14:54:57 +03:00
|
|
|
void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
|
|
|
|
void rb_vm_postponed_job_free(void); /* vm_trace.c */
|
|
|
|
size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
|
|
|
|
void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
|
2019-08-12 11:44:30 +03:00
|
|
|
|
2013-04-05 14:29:38 +04:00
|
|
|
RUBY_SYMBOL_EXPORT_BEGIN
|
2010-07-28 16:28:03 +04:00
|
|
|
|
|
|
|
int rb_thread_check_trap_pending(void);
|
|
|
|
|
2017-12-06 10:19:17 +03:00
|
|
|
/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
|
2017-12-20 07:24:14 +03:00
|
|
|
#define RUBY_EVENT_COVERAGE_LINE 0x010000
|
|
|
|
#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
|
2017-12-06 10:19:17 +03:00
|
|
|
|
2010-08-14 09:58:19 +04:00
|
|
|
extern VALUE rb_get_coverages(void);
|
2017-12-05 10:16:42 +03:00
|
|
|
extern void rb_set_coverages(VALUE, int, VALUE);
|
2018-10-20 08:33:04 +03:00
|
|
|
extern void rb_clear_coverages(void);
|
2010-08-14 09:58:19 +04:00
|
|
|
extern void rb_reset_coverages(void);
|
2021-10-25 14:00:51 +03:00
|
|
|
extern void rb_resume_coverages(void);
|
|
|
|
extern void rb_suspend_coverages(void);
|
2010-08-14 09:58:19 +04:00
|
|
|
|
2013-05-27 01:30:44 +04:00
|
|
|
void rb_postponed_job_flush(rb_vm_t *vm);
|
|
|
|
|
2020-12-21 17:28:05 +03:00
|
|
|
// ractor.c
|
|
|
|
RUBY_EXTERN VALUE rb_eRactorUnsafeError;
|
|
|
|
RUBY_EXTERN VALUE rb_eRactorIsolationError;
|
2020-11-30 10:18:43 +03:00
|
|
|
|
2013-04-05 14:29:38 +04:00
|
|
|
RUBY_SYMBOL_EXPORT_END
|
2010-07-28 16:28:03 +04:00
|
|
|
|
2008-01-18 11:56:11 +03:00
|
|
|
#endif /* RUBY_VM_CORE_H */
|