ruby/variable.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

4311 строки
110 KiB
C
Исходник Обычный вид История

/**********************************************************************
variable.c -
$Author$
created at: Tue Apr 19 23:55:15 JST 1994
* encoding.c: provide basic features for M17N. * parse.y: encoding aware parsing. * parse.y (pragma_encoding): encoding specification pragma. * parse.y (rb_intern3): encoding specified symbols. * string.c (rb_str_length): length based on characters. for older behavior, bytesize method added. * string.c (rb_str_index_m): index based on characters. rindex as well. * string.c (succ_char): encoding aware succeeding string. * string.c (rb_str_reverse): reverse based on characters. * string.c (rb_str_inspect): encoding aware string description. * string.c (rb_str_upcase_bang): encoding aware case conversion. downcase, capitalize, swapcase as well. * string.c (rb_str_tr_bang): tr based on characters. delete, squeeze, tr_s, count as well. * string.c (rb_str_split_m): split based on characters. * string.c (rb_str_each_line): encoding aware each_line. * string.c (rb_str_each_char): added. iteration based on characters. * string.c (rb_str_strip_bang): encoding aware whitespace stripping. lstrip, rstrip as well. * string.c (rb_str_justify): encoding aware justifying (ljust, rjust, center). * string.c (str_encoding): get encoding attribute from a string. * re.c (rb_reg_initialize): encoding aware regular expression * sprintf.c (rb_str_format): formatting (i.e. length count) based on characters. * io.c (rb_io_getc): getc to return one-character string. for older behavior, getbyte method added. * ext/stringio/stringio.c (strio_getc): ditto. * io.c (rb_io_ungetc): allow pushing arbitrary string at the current reading point. * ext/stringio/stringio.c (strio_ungetc): ditto. * ext/strscan/strscan.c: encoding support. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@13261 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2007-08-25 07:29:39 +04:00
Copyright (C) 1993-2007 Yukihiro Matsumoto
Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
Copyright (C) 2000 Information-technology Promotion Agency, Japan
**********************************************************************/
#include "ruby/internal/config.h"
#include <stddef.h>
#include "ruby/internal/stdbool.h"
#include "ccan/list/list.h"
#include "constant.h"
#include "debug_counter.h"
#include "id.h"
#include "id_table.h"
#include "internal.h"
#include "internal/class.h"
#include "internal/compilers.h"
#include "internal/error.h"
#include "internal/eval.h"
#include "internal/hash.h"
#include "internal/object.h"
#include "internal/re.h"
#include "internal/symbol.h"
#include "internal/thread.h"
#include "internal/variable.h"
#include "ruby/encoding.h"
#include "ruby/st.h"
#include "ruby/util.h"
#include "shape.h"
#include "symbol.h"
#include "variable.h"
#include "vm_core.h"
#include "ractor_core.h"
#include "vm_sync.h"
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state;
#define GET_GLOBAL_CVAR_STATE() (ruby_vm_global_cvar_state)
typedef void rb_gvar_compact_t(void *var);
static struct rb_id_table *rb_global_tbl;
static ID autoload;
// This hash table maps file paths to loadable features. We use this to track
// autoload state until it's no longer needed.
// feature (file path) => struct autoload_data
static VALUE autoload_features;
// This mutex is used to protect autoloading state. We use a global mutex which
// is held until a per-feature mutex can be created. This ensures there are no
// race conditions relating to autoload state.
static VALUE autoload_mutex;
static void check_before_mod_set(VALUE, ID, VALUE, const char *);
static void setup_const_entry(rb_const_entry_t *, VALUE, VALUE, rb_const_flag_t);
static VALUE rb_const_search(VALUE klass, ID id, int exclude, int recurse, int visibility);
static st_table *generic_iv_tbl_;
void
Init_var_tables(void)
{
rb_global_tbl = rb_id_table_create(0);
generic_iv_tbl_ = st_init_numtable();
autoload = rb_intern_const("__autoload__");
autoload_mutex = rb_mutex_new();
rb_obj_hide(autoload_mutex);
rb_vm_register_global_object(autoload_mutex);
autoload_features = rb_ident_hash_new();
rb_obj_hide(autoload_features);
rb_vm_register_global_object(autoload_features);
}
static inline bool
rb_namespace_p(VALUE obj)
{
if (RB_SPECIAL_CONST_P(obj)) return false;
switch (RB_BUILTIN_TYPE(obj)) {
case T_MODULE: case T_CLASS: return true;
2020-04-08 09:13:37 +03:00
default: break;
}
return false;
}
/**
* Returns +classpath+ of _klass_, if it is named, or +nil+ for
* anonymous +class+/+module+. A named +classpath+ may contain
* an anonymous component, but the last component is guaranteed
* to not be anonymous. <code>*permanent</code> is set to 1
* if +classpath+ has no anonymous components. There is no builtin
* Ruby level APIs that can change a permanent +classpath+.
*
* YJIT needs this function to not allocate.
*/
static VALUE
classname(VALUE klass, bool *permanent)
{
*permanent = false;
VALUE classpath = RCLASS_EXT(klass)->classpath;
if (classpath == 0) return Qnil;
*permanent = RCLASS_EXT(klass)->permanent_classpath;
return classpath;
}
VALUE
rb_mod_name0(VALUE klass, bool *permanent)
{
return classname(klass, permanent);
}
/*
* call-seq:
* mod.name -> string or nil
*
* Returns the name of the module <i>mod</i>. Returns +nil+ for anonymous modules.
*/
VALUE
rb_mod_name(VALUE mod)
{
// YJIT needs this function to not allocate.
bool permanent;
return classname(mod, &permanent);
}
2024-03-09 15:58:49 +03:00
// Similar to logic in rb_mod_const_get().
static bool
is_constant_path(VALUE name)
{
const char *path = RSTRING_PTR(name);
const char *pend = RSTRING_END(name);
rb_encoding *enc = rb_enc_get(name);
const char *p = path;
if (p >= pend || !*p) {
return false;
}
while (p < pend) {
if (p + 2 <= pend && p[0] == ':' && p[1] == ':') {
p += 2;
}
const char *pbeg = p;
while (p < pend && *p != ':') p++;
if (pbeg == p) return false;
if (rb_enc_symname_type(pbeg, p - pbeg, enc, 0) != ID_CONST) {
return false;
}
}
return true;
}
/*
* call-seq:
* mod.set_temporary_name(string) -> self
* mod.set_temporary_name(nil) -> self
*
* Sets the temporary name of the module. This name is reflected in
* introspection of the module and the values that are related to it, such
* as instances, constants, and methods.
*
2024-03-09 15:58:49 +03:00
* The name should be +nil+ or a non-empty string that is not a valid constant
* path (to avoid confusing between permanent and temporary names).
*
* The method can be useful to distinguish dynamically generated classes and
* modules without assigning them to constants.
*
* If the module is given a permanent name by assigning it to a constant,
* the temporary name is discarded. A temporary name can't be assigned to
* modules that have a permanent name.
*
* If the given name is +nil+, the module becomes anonymous again.
*
* Example:
*
* m = Module.new # => #<Module:0x0000000102c68f38>
* m.name #=> nil
*
* m.set_temporary_name("fake_name") # => fake_name
* m.name #=> "fake_name"
*
* m.set_temporary_name(nil) # => #<Module:0x0000000102c68f38>
* m.name #=> nil
*
* c = Class.new
* c.set_temporary_name("MyClass(with description)")
*
* c.new # => #<MyClass(with description):0x0....>
*
* c::M = m
* c::M.name #=> "MyClass(with description)::M"
*
* # Assigning to a constant replaces the name with a permanent one
* C = c
*
* C.name #=> "C"
* C::M.name #=> "C::M"
* c.new # => #<C:0x0....>
*/
VALUE
rb_mod_set_temporary_name(VALUE mod, VALUE name)
{
// We don't allow setting the name if the classpath is already permanent:
if (RCLASS_EXT(mod)->permanent_classpath) {
rb_raise(rb_eRuntimeError, "can't change permanent name");
}
if (NIL_P(name)) {
// Set the temporary classpath to NULL (anonymous):
RCLASS_SET_CLASSPATH(mod, 0, FALSE);
2024-01-07 18:50:41 +03:00
}
else {
// Ensure the name is a string:
StringValue(name);
if (RSTRING_LEN(name) == 0) {
rb_raise(rb_eArgError, "empty class/module name");
}
if (is_constant_path(name)) {
rb_raise(rb_eArgError, "the temporary name must not be a constant path to avoid confusion");
}
// Set the temporary classpath to the given name:
RCLASS_SET_CLASSPATH(mod, name, FALSE);
}
return mod;
}
static VALUE
make_temporary_path(VALUE obj, VALUE klass)
{
VALUE path;
switch (klass) {
case Qnil:
path = rb_sprintf("#<Class:%p>", (void*)obj);
break;
case Qfalse:
path = rb_sprintf("#<Module:%p>", (void*)obj);
break;
default:
path = rb_sprintf("#<%"PRIsVALUE":%p>", klass, (void*)obj);
break;
}
OBJ_FREEZE(path);
return path;
}
typedef VALUE (*fallback_func)(VALUE obj, VALUE name);
static VALUE
rb_tmp_class_path(VALUE klass, bool *permanent, fallback_func fallback)
{
VALUE path = classname(klass, permanent);
if (!NIL_P(path)) {
return path;
}
if (RB_TYPE_P(klass, T_MODULE)) {
if (rb_obj_class(klass) == rb_cModule) {
path = Qfalse;
}
else {
bool perm;
path = rb_tmp_class_path(RBASIC(klass)->klass, &perm, fallback);
}
}
*permanent = false;
return fallback(klass, path);
}
VALUE
rb_class_path(VALUE klass)
{
bool permanent;
VALUE path = rb_tmp_class_path(klass, &permanent, make_temporary_path);
if (!NIL_P(path)) path = rb_str_dup(path);
return path;
}
VALUE
rb_class_path_cached(VALUE klass)
{
return rb_mod_name(klass);
}
static VALUE
no_fallback(VALUE obj, VALUE name)
{
return name;
}
VALUE
rb_search_class_path(VALUE klass)
{
bool permanent;
return rb_tmp_class_path(klass, &permanent, no_fallback);
}
static VALUE
build_const_pathname(VALUE head, VALUE tail)
{
VALUE path = rb_str_dup(head);
rb_str_cat2(path, "::");
rb_str_append(path, tail);
return rb_fstring(path);
}
static VALUE
build_const_path(VALUE head, ID tail)
{
return build_const_pathname(head, rb_id2str(tail));
}
void
rb_set_class_path_string(VALUE klass, VALUE under, VALUE name)
{
bool permanent = true;
VALUE str;
if (under == rb_cObject) {
str = rb_str_new_frozen(name);
}
else {
str = rb_tmp_class_path(under, &permanent, make_temporary_path);
str = build_const_pathname(str, name);
}
RCLASS_SET_CLASSPATH(klass, str, permanent);
}
void
rb_set_class_path(VALUE klass, VALUE under, const char *name)
{
VALUE str = rb_str_new2(name);
OBJ_FREEZE(str);
rb_set_class_path_string(klass, under, str);
}
VALUE
rb_path_to_class(VALUE pathname)
{
rb_encoding *enc = rb_enc_get(pathname);
const char *pbeg, *pend, *p, *path = RSTRING_PTR(pathname);
ID id;
VALUE c = rb_cObject;
if (!rb_enc_asciicompat(enc)) {
rb_raise(rb_eArgError, "invalid class path encoding (non ASCII)");
}
pbeg = p = path;
pend = path + RSTRING_LEN(pathname);
if (path == pend || path[0] == '#') {
rb_raise(rb_eArgError, "can't retrieve anonymous class %"PRIsVALUE,
QUOTE(pathname));
}
while (p < pend) {
while (p < pend && *p != ':') p++;
id = rb_check_id_cstr(pbeg, p-pbeg, enc);
if (p < pend && p[0] == ':') {
if ((size_t)(pend - p) < 2 || p[1] != ':') goto undefined_class;
p += 2;
pbeg = p;
}
if (!id) {
goto undefined_class;
}
c = rb_const_search(c, id, TRUE, FALSE, FALSE);
2022-11-15 07:24:08 +03:00
if (UNDEF_P(c)) goto undefined_class;
if (!rb_namespace_p(c)) {
rb_raise(rb_eTypeError, "%"PRIsVALUE" does not refer to class/module",
pathname);
}
}
RB_GC_GUARD(pathname);
return c;
undefined_class:
rb_raise(rb_eArgError, "undefined class/module % "PRIsVALUE,
rb_str_subseq(pathname, 0, p-path));
UNREACHABLE_RETURN(Qundef);
}
VALUE
rb_path2class(const char *path)
{
return rb_path_to_class(rb_str_new_cstr(path));
}
VALUE
rb_class_name(VALUE klass)
{
return rb_class_path(rb_class_real(klass));
}
const char *
rb_class2name(VALUE klass)
{
bool permanent;
VALUE path = rb_tmp_class_path(rb_class_real(klass), &permanent, make_temporary_path);
if (NIL_P(path)) return NULL;
return RSTRING_PTR(path);
}
const char *
rb_obj_classname(VALUE obj)
{
return rb_class2name(CLASS_OF(obj));
}
struct trace_var {
int removed;
void (*func)(VALUE arg, VALUE val);
VALUE data;
struct trace_var *next;
};
struct rb_global_variable {
int counter;
int block_trace;
VALUE *data;
rb_gvar_getter_t *getter;
rb_gvar_setter_t *setter;
rb_gvar_marker_t *marker;
rb_gvar_compact_t *compactor;
struct trace_var *trace;
};
struct rb_global_entry {
struct rb_global_variable *var;
ID id;
bool ractor_local;
};
static enum rb_id_table_iterator_result
free_global_entry_i(VALUE val, void *arg)
{
struct rb_global_entry *entry = (struct rb_global_entry *)val;
if (entry->var->counter == 1) {
ruby_xfree(entry->var);
}
else {
entry->var->counter--;
}
ruby_xfree(entry);
return ID_TABLE_DELETE;
}
void
rb_free_rb_global_tbl(void)
{
rb_id_table_foreach_values(rb_global_tbl, free_global_entry_i, 0);
rb_id_table_free(rb_global_tbl);
}
void
rb_free_generic_iv_tbl_(void)
{
st_free_table(generic_iv_tbl_);
}
static struct rb_global_entry*
rb_find_global_entry(ID id)
{
struct rb_global_entry *entry;
VALUE data;
if (!rb_id_table_lookup(rb_global_tbl, id, &data)) {
entry = NULL;
}
else {
entry = (struct rb_global_entry *)data;
RUBY_ASSERT(entry != NULL);
}
if (UNLIKELY(!rb_ractor_main_p()) && (!entry || !entry->ractor_local)) {
rb_raise(rb_eRactorIsolationError, "can not access global variables %s from non-main Ractors", rb_id2name(id));
}
return entry;
}
void
rb_gvar_ractor_local(const char *name)
{
struct rb_global_entry *entry = rb_find_global_entry(rb_intern(name));
entry->ractor_local = true;
}
static void
rb_gvar_undef_compactor(void *var)
{
}
static struct rb_global_entry*
rb_global_entry(ID id)
{
struct rb_global_entry *entry = rb_find_global_entry(id);
if (!entry) {
struct rb_global_variable *var;
entry = ALLOC(struct rb_global_entry);
var = ALLOC(struct rb_global_variable);
entry->id = id;
entry->var = var;
entry->ractor_local = false;
var->counter = 1;
var->data = 0;
var->getter = rb_gvar_undef_getter;
var->setter = rb_gvar_undef_setter;
var->marker = rb_gvar_undef_marker;
var->compactor = rb_gvar_undef_compactor;
var->block_trace = 0;
var->trace = 0;
rb_id_table_insert(rb_global_tbl, id, (VALUE)entry);
}
return entry;
}
VALUE
rb_gvar_undef_getter(ID id, VALUE *_)
{
rb_warning("global variable '%"PRIsVALUE"' not initialized", QUOTE_ID(id));
return Qnil;
}
static void
rb_gvar_val_compactor(void *_var)
{
struct rb_global_variable *var = (struct rb_global_variable *)_var;
VALUE obj = (VALUE)var->data;
if (obj) {
VALUE new = rb_gc_location(obj);
if (new != obj) {
var->data = (void*)new;
}
}
}
void
rb_gvar_undef_setter(VALUE val, ID id, VALUE *_)
{
struct rb_global_variable *var = rb_global_entry(id)->var;
var->getter = rb_gvar_val_getter;
var->setter = rb_gvar_val_setter;
var->marker = rb_gvar_val_marker;
var->compactor = rb_gvar_val_compactor;
var->data = (void*)val;
}
void
rb_gvar_undef_marker(VALUE *var)
{
}
VALUE
rb_gvar_val_getter(ID id, VALUE *data)
{
return (VALUE)data;
}
void
rb_gvar_val_setter(VALUE val, ID id, VALUE *_)
{
struct rb_global_variable *var = rb_global_entry(id)->var;
var->data = (void*)val;
}
void
rb_gvar_val_marker(VALUE *var)
{
VALUE data = (VALUE)var;
if (data) rb_gc_mark_movable(data);
}
VALUE
rb_gvar_var_getter(ID id, VALUE *var)
{
if (!var) return Qnil;
return *var;
}
void
rb_gvar_var_setter(VALUE val, ID id, VALUE *data)
{
*data = val;
}
void
rb_gvar_var_marker(VALUE *var)
{
if (var) rb_gc_mark_maybe(*var);
}
void
rb_gvar_readonly_setter(VALUE v, ID id, VALUE *_)
{
rb_name_error(id, "%"PRIsVALUE" is a read-only variable", QUOTE_ID(id));
}
static enum rb_id_table_iterator_result
mark_global_entry(VALUE v, void *ignored)
{
struct rb_global_entry *entry = (struct rb_global_entry *)v;
struct trace_var *trace;
struct rb_global_variable *var = entry->var;
(*var->marker)(var->data);
trace = var->trace;
while (trace) {
if (trace->data) rb_gc_mark_maybe(trace->data);
trace = trace->next;
}
return ID_TABLE_CONTINUE;
}
2023-05-15 11:22:01 +03:00
#define gc_mark_table(task) \
if (rb_global_tbl) { rb_id_table_foreach_values(rb_global_tbl, task##_global_entry, 0); }
void
rb_gc_mark_global_tbl(void)
{
2023-05-15 11:22:01 +03:00
gc_mark_table(mark);
}
static enum rb_id_table_iterator_result
update_global_entry(VALUE v, void *ignored)
{
struct rb_global_entry *entry = (struct rb_global_entry *)v;
struct rb_global_variable *var = entry->var;
(*var->compactor)(var);
return ID_TABLE_CONTINUE;
}
void
rb_gc_update_global_tbl(void)
{
2023-05-15 11:22:01 +03:00
gc_mark_table(update);
}
static ID
global_id(const char *name)
{
ID id;
if (name[0] == '$') id = rb_intern(name);
else {
size_t len = strlen(name);
VALUE vbuf = 0;
char *buf = ALLOCV_N(char, vbuf, len+1);
buf[0] = '$';
memcpy(buf+1, name, len);
id = rb_intern2(buf, len+1);
ALLOCV_END(vbuf);
}
return id;
}
static ID
find_global_id(const char *name)
{
ID id;
size_t len = strlen(name);
if (name[0] == '$') {
id = rb_check_id_cstr(name, len, NULL);
}
else {
VALUE vbuf = 0;
char *buf = ALLOCV_N(char, vbuf, len+1);
buf[0] = '$';
memcpy(buf+1, name, len);
id = rb_check_id_cstr(buf, len+1, NULL);
ALLOCV_END(vbuf);
}
return id;
}
void
rb_define_hooked_variable(
const char *name,
VALUE *var,
rb_gvar_getter_t *getter,
rb_gvar_setter_t *setter)
{
volatile VALUE tmp = var ? *var : Qnil;
ID id = global_id(name);
struct rb_global_variable *gvar = rb_global_entry(id)->var;
gvar->data = (void*)var;
gvar->getter = getter ? (rb_gvar_getter_t *)getter : rb_gvar_var_getter;
gvar->setter = setter ? (rb_gvar_setter_t *)setter : rb_gvar_var_setter;
gvar->marker = rb_gvar_var_marker;
RB_GC_GUARD(tmp);
}
void
rb_define_variable(const char *name, VALUE *var)
{
rb_define_hooked_variable(name, var, 0, 0);
}
void
rb_define_readonly_variable(const char *name, const VALUE *var)
{
rb_define_hooked_variable(name, (VALUE *)var, 0, rb_gvar_readonly_setter);
}
void
rb_define_virtual_variable(
const char *name,
rb_gvar_getter_t *getter,
rb_gvar_setter_t *setter)
{
if (!getter) getter = rb_gvar_val_getter;
if (!setter) setter = rb_gvar_readonly_setter;
rb_define_hooked_variable(name, 0, getter, setter);
}
static void
rb_trace_eval(VALUE cmd, VALUE val)
{
rb_eval_cmd_kw(cmd, rb_ary_new3(1, val), RB_NO_KEYWORDS);
}
VALUE
rb_f_trace_var(int argc, const VALUE *argv)
{
VALUE var, cmd;
struct rb_global_entry *entry;
struct trace_var *trace;
if (rb_scan_args(argc, argv, "11", &var, &cmd) == 1) {
cmd = rb_block_proc();
}
if (NIL_P(cmd)) {
return rb_f_untrace_var(argc, argv);
}
entry = rb_global_entry(rb_to_id(var));
trace = ALLOC(struct trace_var);
trace->next = entry->var->trace;
trace->func = rb_trace_eval;
trace->data = cmd;
trace->removed = 0;
entry->var->trace = trace;
return Qnil;
}
static void
remove_trace(struct rb_global_variable *var)
{
struct trace_var *trace = var->trace;
struct trace_var t;
struct trace_var *next;
t.next = trace;
trace = &t;
while (trace->next) {
next = trace->next;
if (next->removed) {
trace->next = next->next;
xfree(next);
}
else {
trace = next;
}
}
var->trace = t.next;
}
VALUE
rb_f_untrace_var(int argc, const VALUE *argv)
{
VALUE var, cmd;
ID id;
struct rb_global_entry *entry;
struct trace_var *trace;
rb_scan_args(argc, argv, "11", &var, &cmd);
id = rb_check_id(&var);
if (!id) {
rb_name_error_str(var, "undefined global variable %"PRIsVALUE"", QUOTE(var));
}
if ((entry = rb_find_global_entry(id)) == NULL) {
rb_name_error(id, "undefined global variable %"PRIsVALUE"", QUOTE_ID(id));
}
trace = entry->var->trace;
if (NIL_P(cmd)) {
VALUE ary = rb_ary_new();
while (trace) {
struct trace_var *next = trace->next;
rb_ary_push(ary, (VALUE)trace->data);
trace->removed = 1;
trace = next;
}
if (!entry->var->block_trace) remove_trace(entry->var);
return ary;
}
else {
while (trace) {
if (trace->data == cmd) {
trace->removed = 1;
if (!entry->var->block_trace) remove_trace(entry->var);
return rb_ary_new3(1, cmd);
}
trace = trace->next;
}
}
return Qnil;
}
struct trace_data {
struct trace_var *trace;
VALUE val;
};
static VALUE
trace_ev(VALUE v)
{
struct trace_data *data = (void *)v;
struct trace_var *trace = data->trace;
while (trace) {
(*trace->func)(trace->data, data->val);
trace = trace->next;
}
2012-04-14 04:36:26 +04:00
return Qnil;
}
static VALUE
trace_en(VALUE v)
{
struct rb_global_variable *var = (void *)v;
var->block_trace = 0;
remove_trace(var);
return Qnil; /* not reached */
}
static VALUE
rb_gvar_set_entry(struct rb_global_entry *entry, VALUE val)
{
struct trace_data trace;
struct rb_global_variable *var = entry->var;
(*var->setter)(val, entry->id, var->data);
if (var->trace && !var->block_trace) {
var->block_trace = 1;
trace.trace = var->trace;
trace.val = val;
rb_ensure(trace_ev, (VALUE)&trace, trace_en, (VALUE)var);
}
return val;
}
VALUE
rb_gvar_set(ID id, VALUE val)
{
struct rb_global_entry *entry;
entry = rb_global_entry(id);
return rb_gvar_set_entry(entry, val);
}
VALUE
rb_gv_set(const char *name, VALUE val)
{
return rb_gvar_set(global_id(name), val);
}
VALUE
rb_gvar_get(ID id)
{
struct rb_global_entry *entry = rb_global_entry(id);
struct rb_global_variable *var = entry->var;
return (*var->getter)(entry->id, var->data);
}
VALUE
rb_gv_get(const char *name)
{
ID id = find_global_id(name);
2020-07-20 07:31:41 +03:00
if (!id) {
rb_warning("global variable '%s' not initialized", name);
return Qnil;
}
return rb_gvar_get(id);
}
2023-03-07 08:34:31 +03:00
VALUE
rb_gvar_defined(ID id)
{
struct rb_global_entry *entry = rb_global_entry(id);
2021-09-15 02:11:05 +03:00
return RBOOL(entry->var->getter != rb_gvar_undef_getter);
}
rb_gvar_getter_t *
rb_gvar_getter_function_of(ID id)
{
const struct rb_global_entry *entry = rb_global_entry(id);
return entry->var->getter;
}
rb_gvar_setter_t *
rb_gvar_setter_function_of(ID id)
{
const struct rb_global_entry *entry = rb_global_entry(id);
return entry->var->setter;
}
static enum rb_id_table_iterator_result
gvar_i(ID key, VALUE val, void *a)
{
VALUE ary = (VALUE)a;
rb_ary_push(ary, ID2SYM(key));
return ID_TABLE_CONTINUE;
}
VALUE
rb_f_global_variables(void)
{
VALUE ary = rb_ary_new();
VALUE sym, backref = rb_backref_get();
if (!rb_ractor_main_p()) {
rb_raise(rb_eRactorIsolationError, "can not access global variables from non-main Ractors");
}
rb_id_table_foreach(rb_global_tbl, gvar_i, (void *)ary);
if (!NIL_P(backref)) {
char buf[2];
int i, nmatch = rb_match_count(backref);
buf[0] = '$';
for (i = 1; i <= nmatch; ++i) {
if (!RTEST(rb_reg_nth_defined(i, backref))) continue;
if (i < 10) {
/* probably reused, make static ID */
buf[1] = (char)(i + '0');
sym = ID2SYM(rb_intern2(buf, 2));
}
else {
/* dynamic symbol */
sym = rb_str_intern(rb_sprintf("$%d", i));
}
rb_ary_push(ary, sym);
}
}
return ary;
}
void
rb_alias_variable(ID name1, ID name2)
{
struct rb_global_entry *entry1, *entry2;
VALUE data1;
struct rb_id_table *gtbl = rb_global_tbl;
if (!rb_ractor_main_p()) {
rb_raise(rb_eRactorIsolationError, "can not access global variables from non-main Ractors");
}
entry2 = rb_global_entry(name2);
if (!rb_id_table_lookup(gtbl, name1, &data1)) {
entry1 = ALLOC(struct rb_global_entry);
entry1->id = name1;
rb_id_table_insert(gtbl, name1, (VALUE)entry1);
}
else if ((entry1 = (struct rb_global_entry *)data1)->var != entry2->var) {
struct rb_global_variable *var = entry1->var;
if (var->block_trace) {
rb_raise(rb_eRuntimeError, "can't alias in tracer");
}
var->counter--;
if (var->counter == 0) {
struct trace_var *trace = var->trace;
while (trace) {
struct trace_var *next = trace->next;
xfree(trace);
trace = next;
}
xfree(var);
}
}
else {
return;
}
entry2->var->counter++;
entry1->var = entry2->var;
}
static void
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(ID id)
{
if (UNLIKELY(!rb_ractor_main_p())) {
if (rb_is_instance_id(id)) { // check only normal ivars
rb_raise(rb_eRactorIsolationError, "can not set instance variables of classes/modules by non-main Ractors");
}
}
}
#define CVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR() \
if (UNLIKELY(!rb_ractor_main_p())) { \
rb_raise(rb_eRactorIsolationError, "can not access class variables from non-main Ractors"); \
}
static inline struct st_table *
generic_ivtbl(VALUE obj, ID id, bool force_check_ractor)
{
ASSERT_vm_locking();
if ((force_check_ractor || LIKELY(rb_is_instance_id(id)) /* not internal ID */ ) &&
!RB_OBJ_FROZEN_RAW(obj) &&
UNLIKELY(!rb_ractor_main_p()) &&
UNLIKELY(rb_ractor_shareable_p(obj))) {
rb_raise(rb_eRactorIsolationError, "can not access instance variables of shareable objects from non-main Ractors");
}
return generic_iv_tbl_;
}
static inline struct st_table *
generic_ivtbl_no_ractor_check(VALUE obj)
{
return generic_ivtbl(obj, 0, false);
}
2023-03-07 08:34:31 +03:00
int
rb_gen_ivtbl_get(VALUE obj, ID id, struct gen_ivtbl **ivtbl)
{
RUBY_ASSERT(!RB_TYPE_P(obj, T_ICLASS));
st_data_t data;
int r = 0;
RB_VM_LOCK_ENTER();
{
if (st_lookup(generic_ivtbl(obj, id, false), (st_data_t)obj, &data)) {
*ivtbl = (struct gen_ivtbl *)data;
r = 1;
}
}
RB_VM_LOCK_LEAVE();
return r;
}
2023-03-07 08:34:31 +03:00
int
rb_ivar_generic_ivtbl_lookup(VALUE obj, struct gen_ivtbl **ivtbl)
{
return rb_gen_ivtbl_get(obj, 0, ivtbl);
}
static size_t
gen_ivtbl_bytes(size_t n)
{
return offsetof(struct gen_ivtbl, as.shape.ivptr) + n * sizeof(VALUE);
}
static struct gen_ivtbl *
gen_ivtbl_resize(struct gen_ivtbl *old, uint32_t n)
{
RUBY_ASSERT(n > 0);
uint32_t len = old ? old->as.shape.numiv : 0;
struct gen_ivtbl *ivtbl = xrealloc(old, gen_ivtbl_bytes(n));
ivtbl->as.shape.numiv = n;
for (; len < n; len++) {
ivtbl->as.shape.ivptr[len] = Qundef;
}
return ivtbl;
}
void
rb_mark_generic_ivar(VALUE obj)
{
st_data_t data;
if (st_lookup(generic_ivtbl_no_ractor_check(obj), (st_data_t)obj, &data)) {
struct gen_ivtbl *ivtbl = (struct gen_ivtbl *)data;
if (rb_shape_obj_too_complex(obj)) {
rb_mark_tbl_no_pin(ivtbl->as.complex.table);
2023-10-20 02:01:35 +03:00
}
else {
for (uint32_t i = 0; i < ivtbl->as.shape.numiv; i++) {
rb_gc_mark_movable(ivtbl->as.shape.ivptr[i]);
}
}
}
}
void
rb_ref_update_generic_ivar(VALUE obj)
{
struct gen_ivtbl *ivtbl;
if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
if (rb_shape_obj_too_complex(obj)) {
rb_gc_ref_update_table_values_only(ivtbl->as.complex.table);
}
else {
for (uint32_t i = 0; i < ivtbl->as.shape.numiv; i++) {
ivtbl->as.shape.ivptr[i] = rb_gc_location(ivtbl->as.shape.ivptr[i]);
}
2023-10-20 02:01:35 +03:00
}
}
}
void
rb_mv_generic_ivar(VALUE rsrc, VALUE dst)
{
st_data_t key = (st_data_t)rsrc;
2021-06-23 04:50:09 +03:00
st_data_t ivtbl;
2021-06-23 04:50:09 +03:00
if (st_delete(generic_ivtbl_no_ractor_check(rsrc), &key, &ivtbl))
st_insert(generic_ivtbl_no_ractor_check(dst), (st_data_t)dst, ivtbl);
}
void
rb_free_generic_ivar(VALUE obj)
{
st_data_t key = (st_data_t)obj, value;
bool too_complex = rb_shape_obj_too_complex(obj);
if (st_delete(generic_ivtbl_no_ractor_check(obj), &key, &value)) {
struct gen_ivtbl *ivtbl = (struct gen_ivtbl *)value;
if (UNLIKELY(too_complex)) {
st_free_table(ivtbl->as.complex.table);
}
xfree(ivtbl);
}
}
2024-02-23 00:02:10 +03:00
size_t
rb_generic_ivar_memsize(VALUE obj)
{
struct gen_ivtbl *ivtbl;
if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
if (rb_shape_obj_too_complex(obj)) {
return sizeof(struct gen_ivtbl) + st_memsize(ivtbl->as.complex.table);
}
else {
return gen_ivtbl_bytes(ivtbl->as.shape.numiv);
}
}
return 0;
}
#if !SHAPE_IN_BASIC_FLAGS
2023-03-07 08:34:31 +03:00
shape_id_t
rb_generic_shape_id(VALUE obj)
{
struct gen_ivtbl *ivtbl = 0;
shape_id_t shape_id = 0;
RB_VM_LOCK_ENTER();
{
st_table* global_iv_table = generic_ivtbl(obj, 0, false);
if (global_iv_table && st_lookup(global_iv_table, obj, (st_data_t *)&ivtbl)) {
shape_id = ivtbl->shape_id;
}
else if (OBJ_FROZEN(obj)) {
shape_id = SPECIAL_CONST_SHAPE_ID;
}
}
RB_VM_LOCK_LEAVE();
return shape_id;
}
#endif
static size_t
gen_ivtbl_count(VALUE obj, const struct gen_ivtbl *ivtbl)
{
uint32_t i;
size_t n = 0;
if (rb_shape_obj_too_complex(obj)) {
n = st_table_size(ivtbl->as.complex.table);
}
else {
for (i = 0; i < ivtbl->as.shape.numiv; i++) {
if (!UNDEF_P(ivtbl->as.shape.ivptr[i])) {
n++;
}
}
}
return n;
}
VALUE
rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
{
if (SPECIAL_CONST_P(obj)) return undef;
shape_id_t shape_id;
VALUE * ivar_list;
rb_shape_t * shape;
#if SHAPE_IN_BASIC_FLAGS
shape_id = RBASIC_SHAPE_ID(obj);
#endif
switch (BUILTIN_TYPE(obj)) {
case T_CLASS:
case T_MODULE:
{
bool found = false;
VALUE val;
2021-06-23 04:50:09 +03:00
RB_VM_LOCK_ENTER();
{
#if !SHAPE_IN_BASIC_FLAGS
shape_id = RCLASS_SHAPE_ID(obj);
#endif
if (rb_shape_obj_too_complex(obj)) {
st_table * iv_table = RCLASS_IV_HASH(obj);
if (rb_st_lookup(iv_table, (st_data_t)id, (st_data_t *)&val)) {
found = true;
}
else {
val = undef;
}
}
else {
attr_index_t index = 0;
shape = rb_shape_get_shape_by_id(shape_id);
found = rb_shape_get_iv_index(shape, id, &index);
if (found) {
ivar_list = RCLASS_IVPTR(obj);
RUBY_ASSERT(ivar_list);
val = ivar_list[index];
}
else {
val = undef;
}
}
}
RB_VM_LOCK_LEAVE();
if (found &&
rb_is_instance_id(id) &&
UNLIKELY(!rb_ractor_main_p()) &&
!rb_ractor_shareable_p(val)) {
rb_raise(rb_eRactorIsolationError,
"can not get unshareable values from instance variables of classes/modules from non-main Ractors");
}
return val;
}
case T_OBJECT:
{
#if !SHAPE_IN_BASIC_FLAGS
shape_id = ROBJECT_SHAPE_ID(obj);
#endif
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
if (rb_shape_obj_too_complex(obj)) {
st_table * iv_table = ROBJECT_IV_HASH(obj);
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
VALUE val;
if (rb_st_lookup(iv_table, (st_data_t)id, (st_data_t *)&val)) {
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
return val;
}
else {
return undef;
}
}
RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
ivar_list = ROBJECT_IVPTR(obj);
break;
}
default:
if (FL_TEST_RAW(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
rb_gen_ivtbl_get(obj, id, &ivtbl);
2023-10-20 02:01:35 +03:00
if (rb_shape_obj_too_complex(obj)) {
VALUE val;
if (rb_st_lookup(ivtbl->as.complex.table, (st_data_t)id, (st_data_t *)&val)) {
2023-10-20 02:01:35 +03:00
return val;
}
else {
return undef;
}
}
#if !SHAPE_IN_BASIC_FLAGS
shape_id = ivtbl->shape_id;
#endif
ivar_list = ivtbl->as.shape.ivptr;
2022-11-10 03:11:20 +03:00
}
else {
return undef;
}
break;
}
attr_index_t index = 0;
shape = rb_shape_get_shape_by_id(shape_id);
if (rb_shape_get_iv_index(shape, id, &index)) {
return ivar_list[index];
}
return undef;
}
VALUE
rb_ivar_get(VALUE obj, ID id)
{
VALUE iv = rb_ivar_lookup(obj, id, Qnil);
RB_DEBUG_COUNTER_INC(ivar_get_base);
return iv;
}
VALUE
rb_attr_get(VALUE obj, ID id)
{
return rb_ivar_lookup(obj, id, Qnil);
}
static VALUE
rb_ivar_delete(VALUE obj, ID id, VALUE undef)
{
rb_check_frozen(obj);
VALUE val = undef;
rb_shape_t *shape = rb_shape_get_shape(obj);
if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) {
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
}
if (!rb_shape_transition_shape_remove_ivar(obj, id, shape, &val)) {
if (!rb_shape_obj_too_complex(obj)) {
rb_evict_ivars_to_hash(obj);
}
st_table *table = NULL;
switch (BUILTIN_TYPE(obj)) {
case T_CLASS:
case T_MODULE:
table = RCLASS_IV_HASH(obj);
break;
case T_OBJECT:
table = ROBJECT_IV_HASH(obj);
break;
default: {
struct gen_ivtbl *ivtbl;
if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
table = ivtbl->as.complex.table;
}
break;
}
}
if (table) {
if (!st_delete(table, (st_data_t *)&id, (st_data_t *)&val)) {
val = undef;
}
}
}
return val;
}
VALUE
rb_attr_delete(VALUE obj, ID id)
{
return rb_ivar_delete(obj, id, Qnil);
}
2023-10-20 02:01:35 +03:00
void
rb_obj_convert_to_too_complex(VALUE obj, st_table *table)
2023-10-20 02:01:35 +03:00
{
RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
VALUE *old_ivptr = NULL;
2023-10-20 02:01:35 +03:00
switch (BUILTIN_TYPE(obj)) {
2023-11-20 21:43:31 +03:00
case T_OBJECT:
if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
old_ivptr = ROBJECT_IVPTR(obj);
}
rb_shape_set_shape_id(obj, OBJ_TOO_COMPLEX_SHAPE_ID);
ROBJECT_SET_IV_HASH(obj, table);
break;
case T_CLASS:
case T_MODULE:
old_ivptr = RCLASS_IVPTR(obj);
rb_shape_set_shape_id(obj, OBJ_TOO_COMPLEX_SHAPE_ID);
RCLASS_SET_IV_HASH(obj, table);
break;
default:
RB_VM_LOCK_ENTER();
{
struct st_table *gen_ivs = generic_ivtbl_no_ractor_check(obj);
struct gen_ivtbl *old_ivtbl = NULL;
st_lookup(gen_ivs, (st_data_t)obj, (st_data_t *)&old_ivtbl);
if (old_ivtbl) {
/* We need to modify old_ivtbl to have the too complex shape
* and hold the table because the xmalloc could trigger a GC
* compaction. We want the table to be updated rather than
* the original ivptr. */
#if SHAPE_IN_BASIC_FLAGS
rb_shape_set_shape_id(obj, OBJ_TOO_COMPLEX_SHAPE_ID);
#else
old_ivtbl->shape_id = OBJ_TOO_COMPLEX_SHAPE_ID;
#endif
old_ivtbl->as.complex.table = table;
old_ivptr = (VALUE *)old_ivtbl;
}
2023-11-20 21:43:31 +03:00
struct gen_ivtbl *ivtbl = xmalloc(sizeof(struct gen_ivtbl));
ivtbl->as.complex.table = table;
st_insert(gen_ivs, (st_data_t)obj, (st_data_t)ivtbl);
#if SHAPE_IN_BASIC_FLAGS
2023-11-20 21:43:31 +03:00
rb_shape_set_shape_id(obj, OBJ_TOO_COMPLEX_SHAPE_ID);
#else
2023-11-20 21:43:31 +03:00
ivtbl->shape_id = OBJ_TOO_COMPLEX_SHAPE_ID;
#endif
2023-11-20 21:43:31 +03:00
}
RB_VM_LOCK_LEAVE();
}
2023-10-20 02:01:35 +03:00
xfree(old_ivptr);
}
void
rb_evict_ivars_to_hash(VALUE obj)
{
RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
st_table *table = st_init_numtable_with_size(rb_ivar_count(obj));
// Evacuate all previous values from shape into id_table
rb_obj_copy_ivs_to_hash_table(obj, table);
rb_obj_convert_to_too_complex(obj, table);
RUBY_ASSERT(rb_shape_obj_too_complex(obj));
2023-10-20 02:01:35 +03:00
}
2023-11-06 16:28:20 +03:00
struct general_ivar_set_result {
attr_index_t index;
bool existing;
};
static struct general_ivar_set_result
general_ivar_set(VALUE obj, ID id, VALUE val, void *data,
VALUE *(*shape_ivptr_func)(VALUE, void *),
void (*shape_resize_ivptr_func)(VALUE, attr_index_t, attr_index_t, void *),
void (*set_shape_func)(VALUE, rb_shape_t *, void *),
void (*transition_too_complex_func)(VALUE, void *),
st_table *(*too_complex_table_func)(VALUE, void *))
{
struct general_ivar_set_result result = {
.index = 0,
.existing = true
};
rb_shape_t *current_shape = rb_shape_get_shape(obj);
2023-11-06 16:28:20 +03:00
if (UNLIKELY(current_shape->type == SHAPE_OBJ_TOO_COMPLEX)) {
2023-11-06 16:28:20 +03:00
goto too_complex;
}
attr_index_t index;
if (!rb_shape_get_iv_index(current_shape, id, &index)) {
2023-11-06 16:28:20 +03:00
result.existing = false;
index = current_shape->next_iv_index;
2023-11-06 16:28:20 +03:00
if (index >= MAX_IVARS) {
rb_raise(rb_eArgError, "too many instance variables");
}
rb_shape_t *next_shape = rb_shape_get_next(current_shape, obj, id);
if (UNLIKELY(next_shape->type == SHAPE_OBJ_TOO_COMPLEX)) {
2023-11-06 16:28:20 +03:00
transition_too_complex_func(obj, data);
goto too_complex;
}
else if (UNLIKELY(next_shape->capacity != current_shape->capacity)) {
RUBY_ASSERT(next_shape->capacity > current_shape->capacity);
shape_resize_ivptr_func(obj, current_shape->capacity, next_shape->capacity, data);
}
2023-11-06 16:28:20 +03:00
RUBY_ASSERT(next_shape->type == SHAPE_IVAR);
RUBY_ASSERT(index == (next_shape->next_iv_index - 1));
set_shape_func(obj, next_shape, data);
2023-11-06 16:28:20 +03:00
}
VALUE *table = shape_ivptr_func(obj, data);
RB_OBJ_WRITE(obj, &table[index], val);
result.index = index;
return result;
too_complex:
{
RUBY_ASSERT(rb_shape_obj_too_complex(obj));
st_table *table = too_complex_table_func(obj, data);
result.existing = st_insert(table, (st_data_t)id, (st_data_t)val);
result.index = 0;
RB_OBJ_WRITTEN(obj, Qundef, val);
}
return result;
}
struct gen_ivar_lookup_ensure_size {
2023-11-06 16:34:38 +03:00
VALUE obj;
ID id;
struct gen_ivtbl *ivtbl;
rb_shape_t *shape;
bool resize;
};
static int
generic_ivar_lookup_ensure_size(st_data_t *k, st_data_t *v, st_data_t u, int existing)
{
ASSERT_vm_locking();
struct gen_ivar_lookup_ensure_size *ivar_lookup = (struct gen_ivar_lookup_ensure_size *)u;
struct gen_ivtbl *ivtbl = existing ? (struct gen_ivtbl *)*v : NULL;
if (!existing || ivar_lookup->resize) {
if (existing) {
RUBY_ASSERT(ivar_lookup->shape->type == SHAPE_IVAR);
RUBY_ASSERT(rb_shape_get_shape_by_id(ivar_lookup->shape->parent_id)->capacity < ivar_lookup->shape->capacity);
}
else {
FL_SET_RAW((VALUE)*k, FL_EXIVAR);
}
ivtbl = gen_ivtbl_resize(ivtbl, ivar_lookup->shape->capacity);
*v = (st_data_t)ivtbl;
}
RUBY_ASSERT(FL_TEST((VALUE)*k, FL_EXIVAR));
ivar_lookup->ivtbl = ivtbl;
2023-11-06 16:34:38 +03:00
if (ivar_lookup->shape) {
#if SHAPE_IN_BASIC_FLAGS
rb_shape_set_shape(ivar_lookup->obj, ivar_lookup->shape);
#else
ivtbl->shape_id = rb_shape_id(ivar_lookup->shape);
#endif
}
return ST_CONTINUE;
}
2023-11-06 16:34:38 +03:00
static VALUE *
generic_ivar_set_shape_ivptr(VALUE obj, void *data)
{
2023-11-06 16:34:38 +03:00
RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
struct gen_ivar_lookup_ensure_size *ivar_lookup = data;
RB_VM_LOCK_ENTER();
{
st_update(generic_ivtbl(obj, ivar_lookup->id, false), (st_data_t)obj, generic_ivar_lookup_ensure_size, (st_data_t)ivar_lookup);
}
2023-11-06 16:34:38 +03:00
RB_VM_LOCK_LEAVE();
2023-11-06 16:34:38 +03:00
FL_SET_RAW(obj, FL_EXIVAR);
2023-11-06 16:34:38 +03:00
return ivar_lookup->ivtbl->as.shape.ivptr;
}
2023-11-06 16:34:38 +03:00
static void
generic_ivar_set_shape_resize_ivptr(VALUE obj, attr_index_t _old_capa, attr_index_t new_capa, void *data)
{
struct gen_ivar_lookup_ensure_size *ivar_lookup = data;
2023-11-06 16:34:38 +03:00
ivar_lookup->resize = true;
}
2023-11-06 16:34:38 +03:00
static void
generic_ivar_set_set_shape(VALUE obj, rb_shape_t *shape, void *data)
{
struct gen_ivar_lookup_ensure_size *ivar_lookup = data;
2023-11-06 16:34:38 +03:00
ivar_lookup->shape = shape;
}
2023-11-06 16:34:38 +03:00
static void
generic_ivar_set_transition_too_complex(VALUE obj, void *_data)
{
rb_evict_ivars_to_hash(obj);
2023-11-06 16:34:38 +03:00
FL_SET_RAW(obj, FL_EXIVAR);
}
2023-11-06 16:34:38 +03:00
static st_table *
generic_ivar_set_too_complex_table(VALUE obj, void *data)
{
struct gen_ivar_lookup_ensure_size *ivar_lookup = data;
struct gen_ivtbl *ivtbl;
if (!rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
ivtbl = xmalloc(sizeof(struct gen_ivtbl));
#if !SHAPE_IN_BASIC_FLAGS
ivtbl->shape_id = SHAPE_OBJ_TOO_COMPLEX;
#endif
ivtbl->as.complex.table = st_init_numtable_with_size(1);
RB_VM_LOCK_ENTER();
{
st_insert(generic_ivtbl(obj, ivar_lookup->id, false), (st_data_t)obj, (st_data_t)ivtbl);
2023-10-20 02:01:35 +03:00
}
2023-11-06 16:34:38 +03:00
RB_VM_LOCK_LEAVE();
2023-11-06 16:34:38 +03:00
FL_SET_RAW(obj, FL_EXIVAR);
}
2023-11-06 16:34:38 +03:00
RUBY_ASSERT(rb_shape_obj_too_complex(obj));
2023-11-06 16:34:38 +03:00
return ivtbl->as.complex.table;
}
2023-11-06 16:34:38 +03:00
static void
generic_ivar_set(VALUE obj, ID id, VALUE val)
{
struct gen_ivar_lookup_ensure_size ivar_lookup = {
.obj = obj,
.id = id,
.resize = false,
.shape = NULL,
};
2023-11-06 16:34:38 +03:00
general_ivar_set(obj, id, val, &ivar_lookup,
generic_ivar_set_shape_ivptr,
generic_ivar_set_shape_resize_ivptr,
generic_ivar_set_set_shape,
generic_ivar_set_transition_too_complex,
generic_ivar_set_too_complex_table);
}
void
rb_ensure_iv_list_size(VALUE obj, uint32_t current_capacity, uint32_t new_capacity)
{
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
if (RBASIC(obj)->flags & ROBJECT_EMBED) {
VALUE *ptr = ROBJECT_IVPTR(obj);
VALUE *newptr = ALLOC_N(VALUE, new_capacity);
MEMCPY(newptr, ptr, VALUE, current_capacity);
RB_FL_UNSET_RAW(obj, ROBJECT_EMBED);
ROBJECT(obj)->as.heap.ivptr = newptr;
}
else {
REALLOC_N(ROBJECT(obj)->as.heap.ivptr, VALUE, new_capacity);
}
}
static int
rb_obj_copy_ivs_to_hash_table_i(ID key, VALUE val, st_data_t arg)
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
{
RUBY_ASSERT(!st_lookup((st_table *)arg, (st_data_t)key, NULL));
st_add_direct((st_table *)arg, (st_data_t)key, (st_data_t)val);
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
return ST_CONTINUE;
}
void
rb_obj_copy_ivs_to_hash_table(VALUE obj, st_table *table)
{
rb_ivar_foreach(obj, rb_obj_copy_ivs_to_hash_table_i, (st_data_t)table);
}
2023-11-06 16:29:41 +03:00
static VALUE *
obj_ivar_set_shape_ivptr(VALUE obj, void *_data)
{
2023-11-06 16:29:41 +03:00
RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
2023-11-06 16:29:41 +03:00
return ROBJECT_IVPTR(obj);
}
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
2023-11-06 16:29:41 +03:00
static void
obj_ivar_set_shape_resize_ivptr(VALUE obj, attr_index_t old_capa, attr_index_t new_capa, void *_data)
{
rb_ensure_iv_list_size(obj, old_capa, new_capa);
}
2023-11-06 16:29:41 +03:00
static void
obj_ivar_set_set_shape(VALUE obj, rb_shape_t *shape, void *_data)
{
rb_shape_set_shape(obj, shape);
}
2023-11-06 16:29:41 +03:00
static void
obj_ivar_set_transition_too_complex(VALUE obj, void *_data)
{
rb_evict_ivars_to_hash(obj);
2023-11-06 16:29:41 +03:00
}
2023-11-06 16:29:41 +03:00
static st_table *
obj_ivar_set_too_complex_table(VALUE obj, void *_data)
{
RUBY_ASSERT(rb_shape_obj_too_complex(obj));
2023-11-06 16:29:41 +03:00
return ROBJECT_IV_HASH(obj);
}
2023-11-06 16:29:41 +03:00
attr_index_t
rb_obj_ivar_set(VALUE obj, ID id, VALUE val)
{
return general_ivar_set(obj, id, val, NULL,
obj_ivar_set_shape_ivptr,
obj_ivar_set_shape_resize_ivptr,
obj_ivar_set_set_shape,
obj_ivar_set_transition_too_complex,
obj_ivar_set_too_complex_table).index;
}
This commit implements the Object Shapes technique in CRuby. Object Shapes is used for accessing instance variables and representing the "frozenness" of objects. Object instances have a "shape" and the shape represents some attributes of the object (currently which instance variables are set and the "frozenness"). Shapes form a tree data structure, and when a new instance variable is set on an object, that object "transitions" to a new shape in the shape tree. Each shape has an ID that is used for caching. The shape structure is independent of class, so objects of different types can have the same shape. For example: ```ruby class Foo def initialize # Starts with shape id 0 @a = 1 # transitions to shape id 1 @b = 1 # transitions to shape id 2 end end class Bar def initialize # Starts with shape id 0 @a = 1 # transitions to shape id 1 @b = 1 # transitions to shape id 2 end end foo = Foo.new # `foo` has shape id 2 bar = Bar.new # `bar` has shape id 2 ``` Both `foo` and `bar` instances have the same shape because they both set instance variables of the same name in the same order. This technique can help to improve inline cache hits as well as generate more efficient machine code in JIT compilers. This commit also adds some methods for debugging shapes on objects. See `RubyVM::Shape` for more details. For more context on Object Shapes, see [Feature: #18776] Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org> Co-Authored-By: Eileen M. Uchitelle <eileencodes@gmail.com> Co-Authored-By: John Hawthorn <john@hawthorn.email>
2022-09-23 20:54:42 +03:00
/* Set the instance variable +val+ on object +obj+ at ivar name +id+.
* This function only works with T_OBJECT objects, so make sure
* +obj+ is of type T_OBJECT before using this function.
*/
VALUE
rb_vm_set_ivar_id(VALUE obj, ID id, VALUE val)
{
rb_check_frozen(obj);
rb_obj_ivar_set(obj, id, val);
return val;
}
bool
rb_shape_set_shape_id(VALUE obj, shape_id_t shape_id)
{
if (rb_shape_get_shape_id(obj) == shape_id) {
return false;
This commit implements the Object Shapes technique in CRuby. Object Shapes is used for accessing instance variables and representing the "frozenness" of objects. Object instances have a "shape" and the shape represents some attributes of the object (currently which instance variables are set and the "frozenness"). Shapes form a tree data structure, and when a new instance variable is set on an object, that object "transitions" to a new shape in the shape tree. Each shape has an ID that is used for caching. The shape structure is independent of class, so objects of different types can have the same shape. For example: ```ruby class Foo def initialize # Starts with shape id 0 @a = 1 # transitions to shape id 1 @b = 1 # transitions to shape id 2 end end class Bar def initialize # Starts with shape id 0 @a = 1 # transitions to shape id 1 @b = 1 # transitions to shape id 2 end end foo = Foo.new # `foo` has shape id 2 bar = Bar.new # `bar` has shape id 2 ``` Both `foo` and `bar` instances have the same shape because they both set instance variables of the same name in the same order. This technique can help to improve inline cache hits as well as generate more efficient machine code in JIT compilers. This commit also adds some methods for debugging shapes on objects. See `RubyVM::Shape` for more details. For more context on Object Shapes, see [Feature: #18776] Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org> Co-Authored-By: Eileen M. Uchitelle <eileencodes@gmail.com> Co-Authored-By: John Hawthorn <john@hawthorn.email>
2022-09-23 20:54:42 +03:00
}
#if SHAPE_IN_BASIC_FLAGS
RBASIC_SET_SHAPE_ID(obj, shape_id);
#else
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
2022-10-24 11:42:29 +03:00
ROBJECT_SET_SHAPE_ID(obj, shape_id);
break;
case T_CLASS:
case T_MODULE:
RCLASS_SET_SHAPE_ID(obj, shape_id);
2022-10-24 11:42:29 +03:00
break;
default:
if (shape_id != SPECIAL_CONST_SHAPE_ID) {
2022-10-24 11:42:29 +03:00
struct gen_ivtbl *ivtbl = 0;
RB_VM_LOCK_ENTER();
{
st_table* global_iv_table = generic_ivtbl(obj, 0, false);
if (st_lookup(global_iv_table, obj, (st_data_t *)&ivtbl)) {
ivtbl->shape_id = shape_id;
}
else {
rb_bug("Expected shape_id entry in global iv table");
}
}
RB_VM_LOCK_LEAVE();
}
}
#endif
return true;
}
void rb_obj_freeze_inline(VALUE x)
{
if (RB_FL_ABLE(x)) {
RB_FL_SET_RAW(x, RUBY_FL_FREEZE);
if (TYPE(x) == T_STRING) {
RB_FL_UNSET_RAW(x, FL_USER3); // STR_CHILLED
}
2023-10-20 02:01:35 +03:00
rb_shape_t * next_shape = rb_shape_transition_shape_frozen(x);
// If we're transitioning from "not complex" to "too complex"
// then evict ivars. This can happen if we run out of shapes
if (!rb_shape_obj_too_complex(x) && next_shape->type == SHAPE_OBJ_TOO_COMPLEX) {
rb_evict_ivars_to_hash(x);
2023-10-20 02:01:35 +03:00
}
rb_shape_set_shape(x, next_shape);
if (RBASIC_CLASS(x)) {
rb_freeze_singleton_class(x);
}
}
}
static void
ivar_set(VALUE obj, ID id, VALUE val)
{
RB_DEBUG_COUNTER_INC(ivar_set_base);
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
{
2023-11-28 16:12:35 +03:00
rb_obj_ivar_set(obj, id, val);
break;
}
case T_CLASS:
case T_MODULE:
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
rb_class_ivar_set(obj, id, val);
break;
default:
generic_ivar_set(obj, id, val);
break;
}
}
VALUE
rb_ivar_set(VALUE obj, ID id, VALUE val)
{
rb_check_frozen(obj);
ivar_set(obj, id, val);
return val;
}
void
rb_ivar_set_internal(VALUE obj, ID id, VALUE val)
{
// should be internal instance variable name (no @ prefix)
VM_ASSERT(!rb_is_instance_id(id));
ivar_set(obj, id, val);
}
VALUE
rb_ivar_defined(VALUE obj, ID id)
{
attr_index_t index;
if (SPECIAL_CONST_P(obj)) return Qfalse;
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
if (rb_shape_obj_too_complex(obj)) {
VALUE idx;
st_table *table = NULL;
switch (BUILTIN_TYPE(obj)) {
case T_CLASS:
case T_MODULE:
table = (st_table *)RCLASS_IVPTR(obj);
break;
case T_OBJECT:
table = ROBJECT_IV_HASH(obj);
break;
default: {
struct gen_ivtbl *ivtbl;
if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
table = ivtbl->as.complex.table;
}
break;
}
}
if (!table || !rb_st_lookup(table, id, &idx)) {
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
return Qfalse;
}
return Qtrue;
}
else {
return RBOOL(rb_shape_get_iv_index(rb_shape_get_shape(obj), id, &index));
}
}
typedef int rb_ivar_foreach_callback_func(ID key, VALUE val, st_data_t arg);
st_data_t rb_st_nth_key(st_table *tab, st_index_t index);
Always lookup IV buffers when iterating Always look up instance variable buffers when iterating. It is possible for the instance variable buffer to change out from under the object during iteration, so we cannot cache the buffer on the stack. In the case of Bug #19095, the transient heap moved the buffer during iteration: ``` Watchpoint 1 hit: old value: 0x0000000107c00df8 new value: 0x00000001032743c0 Process 31720 stopped * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 1358 } 1359 MEMCPY(new_ptr, old_ptr, VALUE, len); 1360 ROBJECT(obj)->as.heap.ivptr = new_ptr; -> 1361 } 1362 } 1363 #endif 1364 miniruby`rb_obj_transient_heap_evacuate: -> 0x1006e5178 <+328>: b 0x1006e517c ; <+332> at variable.c:1362:1 0x1006e517c <+332>: ldp x29, x30, [sp, #0x50] 0x1006e5180 <+336>: add sp, sp, #0x60 0x1006e5184 <+340>: ret Target 0: (miniruby) stopped. (lldb) bt * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 * frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 frame #1: 0x00000001006cb150 miniruby`transient_heap_block_evacuate(theap=0x0000000100b196c0, block=0x0000000107c00000) at transient_heap.c:734:17 frame #2: 0x00000001006c854c miniruby`transient_heap_evacuate(dmy=0x0000000000000000) at transient_heap.c:808:17 frame #3: 0x00000001007fe6c0 miniruby`rb_postponed_job_flush(vm=0x0000000104402900) at vm_trace.c:1773:21 frame #4: 0x0000000100637a84 miniruby`rb_threadptr_execute_interrupts(th=0x0000000103803bc0, blocking_timing=0) at thread.c:2316:13 frame #5: 0x000000010078b730 miniruby`rb_vm_check_ints(ec=0x00000001048038d0) at vm_core.h:2025:9 frame #6: 0x00000001006fbd10 miniruby`vm_pop_frame(ec=0x00000001048038d0, cfp=0x0000000104a04440, ep=0x0000000104904a28) at vm_insnhelper.c:422:5 frame #7: 0x00000001006fbca0 miniruby`rb_vm_pop_frame(ec=0x00000001048038d0) at vm_insnhelper.c:431:5 frame #8: 0x00000001007d6420 miniruby`vm_call0_cfunc_with_frame(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:153:9 frame #9: 0x00000001007d44cc miniruby`vm_call0_cfunc(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:164:12 frame #10: 0x0000000100766e80 miniruby`vm_call0_body(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:210:15 frame #11: 0x00000001007d76f0 miniruby`vm_call0_cc(ec=0x00000001048038d0, recv=0x000000010d6b49d8, id=2769, argc=0, argv=0x0000000000000000, cc=0x000000010d6b2e58, kw_splat=0) at vm_eval.c:87:12 frame #12: 0x0000000100769e48 miniruby`rb_funcallv_scope(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) at vm_eval.c:1051:16 frame #13: 0x0000000100760a54 miniruby`rb_funcallv(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000) at vm_eval.c:1066:12 frame #14: 0x000000010037513c miniruby`rb_inspect(obj=0x000000010d6b49d8) at object.c:633:34 frame #15: 0x000000010002c950 miniruby`inspect_ary(ary=0x000000010d6b4938, dummy=0x0000000000000000, recur=0) at array.c:3091:13 frame #16: 0x0000000100642020 miniruby`exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, pairid=0x0000000000000000, arg=0x0000000000000000, outer=0, mid=2769) at thread.c:5177:23 frame #17: 0x00000001006412fc miniruby`rb_exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, arg=0x0000000000000000) at thread.c:5205:12 frame #18: 0x00000001000127f0 miniruby`rb_ary_inspect(ary=0x000000010d6b4938) at array.c:3117:12 ``` In general though, any calls back out to the interpreter could change the IV buffer, so it's not safe to cache. [Bug #19095]
2022-11-01 01:38:51 +03:00
struct iv_itr_data {
VALUE obj;
struct gen_ivtbl * ivtbl;
st_data_t arg;
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
rb_ivar_foreach_callback_func *func;
Always lookup IV buffers when iterating Always look up instance variable buffers when iterating. It is possible for the instance variable buffer to change out from under the object during iteration, so we cannot cache the buffer on the stack. In the case of Bug #19095, the transient heap moved the buffer during iteration: ``` Watchpoint 1 hit: old value: 0x0000000107c00df8 new value: 0x00000001032743c0 Process 31720 stopped * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 1358 } 1359 MEMCPY(new_ptr, old_ptr, VALUE, len); 1360 ROBJECT(obj)->as.heap.ivptr = new_ptr; -> 1361 } 1362 } 1363 #endif 1364 miniruby`rb_obj_transient_heap_evacuate: -> 0x1006e5178 <+328>: b 0x1006e517c ; <+332> at variable.c:1362:1 0x1006e517c <+332>: ldp x29, x30, [sp, #0x50] 0x1006e5180 <+336>: add sp, sp, #0x60 0x1006e5184 <+340>: ret Target 0: (miniruby) stopped. (lldb) bt * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 * frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 frame #1: 0x00000001006cb150 miniruby`transient_heap_block_evacuate(theap=0x0000000100b196c0, block=0x0000000107c00000) at transient_heap.c:734:17 frame #2: 0x00000001006c854c miniruby`transient_heap_evacuate(dmy=0x0000000000000000) at transient_heap.c:808:17 frame #3: 0x00000001007fe6c0 miniruby`rb_postponed_job_flush(vm=0x0000000104402900) at vm_trace.c:1773:21 frame #4: 0x0000000100637a84 miniruby`rb_threadptr_execute_interrupts(th=0x0000000103803bc0, blocking_timing=0) at thread.c:2316:13 frame #5: 0x000000010078b730 miniruby`rb_vm_check_ints(ec=0x00000001048038d0) at vm_core.h:2025:9 frame #6: 0x00000001006fbd10 miniruby`vm_pop_frame(ec=0x00000001048038d0, cfp=0x0000000104a04440, ep=0x0000000104904a28) at vm_insnhelper.c:422:5 frame #7: 0x00000001006fbca0 miniruby`rb_vm_pop_frame(ec=0x00000001048038d0) at vm_insnhelper.c:431:5 frame #8: 0x00000001007d6420 miniruby`vm_call0_cfunc_with_frame(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:153:9 frame #9: 0x00000001007d44cc miniruby`vm_call0_cfunc(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:164:12 frame #10: 0x0000000100766e80 miniruby`vm_call0_body(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:210:15 frame #11: 0x00000001007d76f0 miniruby`vm_call0_cc(ec=0x00000001048038d0, recv=0x000000010d6b49d8, id=2769, argc=0, argv=0x0000000000000000, cc=0x000000010d6b2e58, kw_splat=0) at vm_eval.c:87:12 frame #12: 0x0000000100769e48 miniruby`rb_funcallv_scope(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) at vm_eval.c:1051:16 frame #13: 0x0000000100760a54 miniruby`rb_funcallv(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000) at vm_eval.c:1066:12 frame #14: 0x000000010037513c miniruby`rb_inspect(obj=0x000000010d6b49d8) at object.c:633:34 frame #15: 0x000000010002c950 miniruby`inspect_ary(ary=0x000000010d6b4938, dummy=0x0000000000000000, recur=0) at array.c:3091:13 frame #16: 0x0000000100642020 miniruby`exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, pairid=0x0000000000000000, arg=0x0000000000000000, outer=0, mid=2769) at thread.c:5177:23 frame #17: 0x00000001006412fc miniruby`rb_exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, arg=0x0000000000000000) at thread.c:5205:12 frame #18: 0x00000001000127f0 miniruby`rb_ary_inspect(ary=0x000000010d6b4938) at array.c:3117:12 ``` In general though, any calls back out to the interpreter could change the IV buffer, so it's not safe to cache. [Bug #19095]
2022-11-01 01:38:51 +03:00
};
/*
* Returns a flag to stop iterating depending on the result of +callback+.
*/
static bool
Always lookup IV buffers when iterating Always look up instance variable buffers when iterating. It is possible for the instance variable buffer to change out from under the object during iteration, so we cannot cache the buffer on the stack. In the case of Bug #19095, the transient heap moved the buffer during iteration: ``` Watchpoint 1 hit: old value: 0x0000000107c00df8 new value: 0x00000001032743c0 Process 31720 stopped * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 1358 } 1359 MEMCPY(new_ptr, old_ptr, VALUE, len); 1360 ROBJECT(obj)->as.heap.ivptr = new_ptr; -> 1361 } 1362 } 1363 #endif 1364 miniruby`rb_obj_transient_heap_evacuate: -> 0x1006e5178 <+328>: b 0x1006e517c ; <+332> at variable.c:1362:1 0x1006e517c <+332>: ldp x29, x30, [sp, #0x50] 0x1006e5180 <+336>: add sp, sp, #0x60 0x1006e5184 <+340>: ret Target 0: (miniruby) stopped. (lldb) bt * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 * frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 frame #1: 0x00000001006cb150 miniruby`transient_heap_block_evacuate(theap=0x0000000100b196c0, block=0x0000000107c00000) at transient_heap.c:734:17 frame #2: 0x00000001006c854c miniruby`transient_heap_evacuate(dmy=0x0000000000000000) at transient_heap.c:808:17 frame #3: 0x00000001007fe6c0 miniruby`rb_postponed_job_flush(vm=0x0000000104402900) at vm_trace.c:1773:21 frame #4: 0x0000000100637a84 miniruby`rb_threadptr_execute_interrupts(th=0x0000000103803bc0, blocking_timing=0) at thread.c:2316:13 frame #5: 0x000000010078b730 miniruby`rb_vm_check_ints(ec=0x00000001048038d0) at vm_core.h:2025:9 frame #6: 0x00000001006fbd10 miniruby`vm_pop_frame(ec=0x00000001048038d0, cfp=0x0000000104a04440, ep=0x0000000104904a28) at vm_insnhelper.c:422:5 frame #7: 0x00000001006fbca0 miniruby`rb_vm_pop_frame(ec=0x00000001048038d0) at vm_insnhelper.c:431:5 frame #8: 0x00000001007d6420 miniruby`vm_call0_cfunc_with_frame(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:153:9 frame #9: 0x00000001007d44cc miniruby`vm_call0_cfunc(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:164:12 frame #10: 0x0000000100766e80 miniruby`vm_call0_body(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:210:15 frame #11: 0x00000001007d76f0 miniruby`vm_call0_cc(ec=0x00000001048038d0, recv=0x000000010d6b49d8, id=2769, argc=0, argv=0x0000000000000000, cc=0x000000010d6b2e58, kw_splat=0) at vm_eval.c:87:12 frame #12: 0x0000000100769e48 miniruby`rb_funcallv_scope(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) at vm_eval.c:1051:16 frame #13: 0x0000000100760a54 miniruby`rb_funcallv(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000) at vm_eval.c:1066:12 frame #14: 0x000000010037513c miniruby`rb_inspect(obj=0x000000010d6b49d8) at object.c:633:34 frame #15: 0x000000010002c950 miniruby`inspect_ary(ary=0x000000010d6b4938, dummy=0x0000000000000000, recur=0) at array.c:3091:13 frame #16: 0x0000000100642020 miniruby`exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, pairid=0x0000000000000000, arg=0x0000000000000000, outer=0, mid=2769) at thread.c:5177:23 frame #17: 0x00000001006412fc miniruby`rb_exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, arg=0x0000000000000000) at thread.c:5205:12 frame #18: 0x00000001000127f0 miniruby`rb_ary_inspect(ary=0x000000010d6b4938) at array.c:3117:12 ``` In general though, any calls back out to the interpreter could change the IV buffer, so it's not safe to cache. [Bug #19095]
2022-11-01 01:38:51 +03:00
iterate_over_shapes_with_callback(rb_shape_t *shape, rb_ivar_foreach_callback_func *callback, struct iv_itr_data * itr_data)
2022-10-24 11:42:29 +03:00
{
switch ((enum shape_type)shape->type) {
2022-10-24 11:42:29 +03:00
case SHAPE_ROOT:
case SHAPE_T_OBJECT:
return false;
2022-10-24 11:42:29 +03:00
case SHAPE_IVAR:
ASSUME(callback);
if (iterate_over_shapes_with_callback(rb_shape_get_parent(shape), callback, itr_data))
return true;
Always lookup IV buffers when iterating Always look up instance variable buffers when iterating. It is possible for the instance variable buffer to change out from under the object during iteration, so we cannot cache the buffer on the stack. In the case of Bug #19095, the transient heap moved the buffer during iteration: ``` Watchpoint 1 hit: old value: 0x0000000107c00df8 new value: 0x00000001032743c0 Process 31720 stopped * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 1358 } 1359 MEMCPY(new_ptr, old_ptr, VALUE, len); 1360 ROBJECT(obj)->as.heap.ivptr = new_ptr; -> 1361 } 1362 } 1363 #endif 1364 miniruby`rb_obj_transient_heap_evacuate: -> 0x1006e5178 <+328>: b 0x1006e517c ; <+332> at variable.c:1362:1 0x1006e517c <+332>: ldp x29, x30, [sp, #0x50] 0x1006e5180 <+336>: add sp, sp, #0x60 0x1006e5184 <+340>: ret Target 0: (miniruby) stopped. (lldb) bt * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 * frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 frame #1: 0x00000001006cb150 miniruby`transient_heap_block_evacuate(theap=0x0000000100b196c0, block=0x0000000107c00000) at transient_heap.c:734:17 frame #2: 0x00000001006c854c miniruby`transient_heap_evacuate(dmy=0x0000000000000000) at transient_heap.c:808:17 frame #3: 0x00000001007fe6c0 miniruby`rb_postponed_job_flush(vm=0x0000000104402900) at vm_trace.c:1773:21 frame #4: 0x0000000100637a84 miniruby`rb_threadptr_execute_interrupts(th=0x0000000103803bc0, blocking_timing=0) at thread.c:2316:13 frame #5: 0x000000010078b730 miniruby`rb_vm_check_ints(ec=0x00000001048038d0) at vm_core.h:2025:9 frame #6: 0x00000001006fbd10 miniruby`vm_pop_frame(ec=0x00000001048038d0, cfp=0x0000000104a04440, ep=0x0000000104904a28) at vm_insnhelper.c:422:5 frame #7: 0x00000001006fbca0 miniruby`rb_vm_pop_frame(ec=0x00000001048038d0) at vm_insnhelper.c:431:5 frame #8: 0x00000001007d6420 miniruby`vm_call0_cfunc_with_frame(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:153:9 frame #9: 0x00000001007d44cc miniruby`vm_call0_cfunc(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:164:12 frame #10: 0x0000000100766e80 miniruby`vm_call0_body(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:210:15 frame #11: 0x00000001007d76f0 miniruby`vm_call0_cc(ec=0x00000001048038d0, recv=0x000000010d6b49d8, id=2769, argc=0, argv=0x0000000000000000, cc=0x000000010d6b2e58, kw_splat=0) at vm_eval.c:87:12 frame #12: 0x0000000100769e48 miniruby`rb_funcallv_scope(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) at vm_eval.c:1051:16 frame #13: 0x0000000100760a54 miniruby`rb_funcallv(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000) at vm_eval.c:1066:12 frame #14: 0x000000010037513c miniruby`rb_inspect(obj=0x000000010d6b49d8) at object.c:633:34 frame #15: 0x000000010002c950 miniruby`inspect_ary(ary=0x000000010d6b4938, dummy=0x0000000000000000, recur=0) at array.c:3091:13 frame #16: 0x0000000100642020 miniruby`exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, pairid=0x0000000000000000, arg=0x0000000000000000, outer=0, mid=2769) at thread.c:5177:23 frame #17: 0x00000001006412fc miniruby`rb_exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, arg=0x0000000000000000) at thread.c:5205:12 frame #18: 0x00000001000127f0 miniruby`rb_ary_inspect(ary=0x000000010d6b4938) at array.c:3117:12 ``` In general though, any calls back out to the interpreter could change the IV buffer, so it's not safe to cache. [Bug #19095]
2022-11-01 01:38:51 +03:00
VALUE * iv_list;
2022-11-01 03:35:09 +03:00
switch (BUILTIN_TYPE(itr_data->obj)) {
case T_OBJECT:
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
RUBY_ASSERT(!rb_shape_obj_too_complex(itr_data->obj));
2022-11-01 03:35:09 +03:00
iv_list = ROBJECT_IVPTR(itr_data->obj);
break;
case T_CLASS:
case T_MODULE:
iv_list = RCLASS_IVPTR(itr_data->obj);
break;
default:
iv_list = itr_data->ivtbl->as.shape.ivptr;
2022-11-01 03:35:09 +03:00
break;
Always lookup IV buffers when iterating Always look up instance variable buffers when iterating. It is possible for the instance variable buffer to change out from under the object during iteration, so we cannot cache the buffer on the stack. In the case of Bug #19095, the transient heap moved the buffer during iteration: ``` Watchpoint 1 hit: old value: 0x0000000107c00df8 new value: 0x00000001032743c0 Process 31720 stopped * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 1358 } 1359 MEMCPY(new_ptr, old_ptr, VALUE, len); 1360 ROBJECT(obj)->as.heap.ivptr = new_ptr; -> 1361 } 1362 } 1363 #endif 1364 miniruby`rb_obj_transient_heap_evacuate: -> 0x1006e5178 <+328>: b 0x1006e517c ; <+332> at variable.c:1362:1 0x1006e517c <+332>: ldp x29, x30, [sp, #0x50] 0x1006e5180 <+336>: add sp, sp, #0x60 0x1006e5184 <+340>: ret Target 0: (miniruby) stopped. (lldb) bt * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 * frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 frame #1: 0x00000001006cb150 miniruby`transient_heap_block_evacuate(theap=0x0000000100b196c0, block=0x0000000107c00000) at transient_heap.c:734:17 frame #2: 0x00000001006c854c miniruby`transient_heap_evacuate(dmy=0x0000000000000000) at transient_heap.c:808:17 frame #3: 0x00000001007fe6c0 miniruby`rb_postponed_job_flush(vm=0x0000000104402900) at vm_trace.c:1773:21 frame #4: 0x0000000100637a84 miniruby`rb_threadptr_execute_interrupts(th=0x0000000103803bc0, blocking_timing=0) at thread.c:2316:13 frame #5: 0x000000010078b730 miniruby`rb_vm_check_ints(ec=0x00000001048038d0) at vm_core.h:2025:9 frame #6: 0x00000001006fbd10 miniruby`vm_pop_frame(ec=0x00000001048038d0, cfp=0x0000000104a04440, ep=0x0000000104904a28) at vm_insnhelper.c:422:5 frame #7: 0x00000001006fbca0 miniruby`rb_vm_pop_frame(ec=0x00000001048038d0) at vm_insnhelper.c:431:5 frame #8: 0x00000001007d6420 miniruby`vm_call0_cfunc_with_frame(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:153:9 frame #9: 0x00000001007d44cc miniruby`vm_call0_cfunc(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:164:12 frame #10: 0x0000000100766e80 miniruby`vm_call0_body(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:210:15 frame #11: 0x00000001007d76f0 miniruby`vm_call0_cc(ec=0x00000001048038d0, recv=0x000000010d6b49d8, id=2769, argc=0, argv=0x0000000000000000, cc=0x000000010d6b2e58, kw_splat=0) at vm_eval.c:87:12 frame #12: 0x0000000100769e48 miniruby`rb_funcallv_scope(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) at vm_eval.c:1051:16 frame #13: 0x0000000100760a54 miniruby`rb_funcallv(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000) at vm_eval.c:1066:12 frame #14: 0x000000010037513c miniruby`rb_inspect(obj=0x000000010d6b49d8) at object.c:633:34 frame #15: 0x000000010002c950 miniruby`inspect_ary(ary=0x000000010d6b4938, dummy=0x0000000000000000, recur=0) at array.c:3091:13 frame #16: 0x0000000100642020 miniruby`exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, pairid=0x0000000000000000, arg=0x0000000000000000, outer=0, mid=2769) at thread.c:5177:23 frame #17: 0x00000001006412fc miniruby`rb_exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, arg=0x0000000000000000) at thread.c:5205:12 frame #18: 0x00000001000127f0 miniruby`rb_ary_inspect(ary=0x000000010d6b4938) at array.c:3117:12 ``` In general though, any calls back out to the interpreter could change the IV buffer, so it's not safe to cache. [Bug #19095]
2022-11-01 01:38:51 +03:00
}
2022-10-24 11:42:29 +03:00
VALUE val = iv_list[shape->next_iv_index - 1];
2022-11-15 07:24:08 +03:00
if (!UNDEF_P(val)) {
switch (callback(shape->edge_name, val, itr_data->arg)) {
case ST_CHECK:
case ST_CONTINUE:
break;
case ST_STOP:
return true;
default:
rb_bug("unreachable");
}
2022-10-24 11:42:29 +03:00
}
return false;
2022-10-24 11:42:29 +03:00
case SHAPE_FROZEN:
return iterate_over_shapes_with_callback(rb_shape_get_parent(shape), callback, itr_data);
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
case SHAPE_OBJ_TOO_COMPLEX:
default:
rb_bug("Unreachable");
}
}
static int
each_hash_iv(st_data_t id, st_data_t val, st_data_t data)
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
{
struct iv_itr_data * itr_data = (struct iv_itr_data *)data;
rb_ivar_foreach_callback_func *callback = itr_data->func;
return callback((ID)id, (VALUE)val, itr_data->arg);
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
}
static void
obj_ivar_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
{
rb_shape_t* shape = rb_shape_get_shape(obj);
Always lookup IV buffers when iterating Always look up instance variable buffers when iterating. It is possible for the instance variable buffer to change out from under the object during iteration, so we cannot cache the buffer on the stack. In the case of Bug #19095, the transient heap moved the buffer during iteration: ``` Watchpoint 1 hit: old value: 0x0000000107c00df8 new value: 0x00000001032743c0 Process 31720 stopped * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 1358 } 1359 MEMCPY(new_ptr, old_ptr, VALUE, len); 1360 ROBJECT(obj)->as.heap.ivptr = new_ptr; -> 1361 } 1362 } 1363 #endif 1364 miniruby`rb_obj_transient_heap_evacuate: -> 0x1006e5178 <+328>: b 0x1006e517c ; <+332> at variable.c:1362:1 0x1006e517c <+332>: ldp x29, x30, [sp, #0x50] 0x1006e5180 <+336>: add sp, sp, #0x60 0x1006e5184 <+340>: ret Target 0: (miniruby) stopped. (lldb) bt * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 * frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 frame #1: 0x00000001006cb150 miniruby`transient_heap_block_evacuate(theap=0x0000000100b196c0, block=0x0000000107c00000) at transient_heap.c:734:17 frame #2: 0x00000001006c854c miniruby`transient_heap_evacuate(dmy=0x0000000000000000) at transient_heap.c:808:17 frame #3: 0x00000001007fe6c0 miniruby`rb_postponed_job_flush(vm=0x0000000104402900) at vm_trace.c:1773:21 frame #4: 0x0000000100637a84 miniruby`rb_threadptr_execute_interrupts(th=0x0000000103803bc0, blocking_timing=0) at thread.c:2316:13 frame #5: 0x000000010078b730 miniruby`rb_vm_check_ints(ec=0x00000001048038d0) at vm_core.h:2025:9 frame #6: 0x00000001006fbd10 miniruby`vm_pop_frame(ec=0x00000001048038d0, cfp=0x0000000104a04440, ep=0x0000000104904a28) at vm_insnhelper.c:422:5 frame #7: 0x00000001006fbca0 miniruby`rb_vm_pop_frame(ec=0x00000001048038d0) at vm_insnhelper.c:431:5 frame #8: 0x00000001007d6420 miniruby`vm_call0_cfunc_with_frame(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:153:9 frame #9: 0x00000001007d44cc miniruby`vm_call0_cfunc(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:164:12 frame #10: 0x0000000100766e80 miniruby`vm_call0_body(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:210:15 frame #11: 0x00000001007d76f0 miniruby`vm_call0_cc(ec=0x00000001048038d0, recv=0x000000010d6b49d8, id=2769, argc=0, argv=0x0000000000000000, cc=0x000000010d6b2e58, kw_splat=0) at vm_eval.c:87:12 frame #12: 0x0000000100769e48 miniruby`rb_funcallv_scope(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) at vm_eval.c:1051:16 frame #13: 0x0000000100760a54 miniruby`rb_funcallv(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000) at vm_eval.c:1066:12 frame #14: 0x000000010037513c miniruby`rb_inspect(obj=0x000000010d6b49d8) at object.c:633:34 frame #15: 0x000000010002c950 miniruby`inspect_ary(ary=0x000000010d6b4938, dummy=0x0000000000000000, recur=0) at array.c:3091:13 frame #16: 0x0000000100642020 miniruby`exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, pairid=0x0000000000000000, arg=0x0000000000000000, outer=0, mid=2769) at thread.c:5177:23 frame #17: 0x00000001006412fc miniruby`rb_exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, arg=0x0000000000000000) at thread.c:5205:12 frame #18: 0x00000001000127f0 miniruby`rb_ary_inspect(ary=0x000000010d6b4938) at array.c:3117:12 ``` In general though, any calls back out to the interpreter could change the IV buffer, so it's not safe to cache. [Bug #19095]
2022-11-01 01:38:51 +03:00
struct iv_itr_data itr_data;
itr_data.obj = obj;
itr_data.arg = arg;
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
itr_data.func = func;
if (rb_shape_obj_too_complex(obj)) {
rb_st_foreach(ROBJECT_IV_HASH(obj), each_hash_iv, (st_data_t)&itr_data);
Transition complex objects to "too complex" shape When an object becomes "too complex" (in other words it has too many variations in the shape tree), we transition it to use a "too complex" shape and use a hash for storing instance variables. Without this patch, there were rare cases where shape tree growth could "explode" and cause performance degradation on what would otherwise have been cached fast paths. This patch puts a limit on shape tree growth, and gracefully degrades in the rare case where there could be a factorial growth in the shape tree. For example: ```ruby class NG; end HUGE_NUMBER.times do NG.new.instance_variable_set(:"@unique_ivar_#{_1}", 1) end ``` We consider objects to be "too complex" when the object's class has more than SHAPE_MAX_VARIATIONS (currently 8) leaf nodes in the shape tree and the object introduces a new variation (a new leaf node) associated with that class. For example, new variations on instances of the following class would be considered "too complex" because those instances create more than 8 leaves in the shape tree: ```ruby class Foo; end 9.times { Foo.new.instance_variable_set(":@uniq_#{_1}", 1) } ``` However, the following class is *not* too complex because it only has one leaf in the shape tree: ```ruby class Foo def initialize @a = @b = @c = @d = @e = @f = @g = @h = @i = nil end end 9.times { Foo.new } `` This case is rare, so we don't expect this change to impact performance of most applications, but it needs to be handled. Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
2022-12-09 01:16:52 +03:00
}
else {
iterate_over_shapes_with_callback(shape, func, &itr_data);
}
}
static void
gen_ivar_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
{
rb_shape_t *shape = rb_shape_get_shape(obj);
struct gen_ivtbl *ivtbl;
if (!rb_gen_ivtbl_get(obj, 0, &ivtbl)) return;
Always lookup IV buffers when iterating Always look up instance variable buffers when iterating. It is possible for the instance variable buffer to change out from under the object during iteration, so we cannot cache the buffer on the stack. In the case of Bug #19095, the transient heap moved the buffer during iteration: ``` Watchpoint 1 hit: old value: 0x0000000107c00df8 new value: 0x00000001032743c0 Process 31720 stopped * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 1358 } 1359 MEMCPY(new_ptr, old_ptr, VALUE, len); 1360 ROBJECT(obj)->as.heap.ivptr = new_ptr; -> 1361 } 1362 } 1363 #endif 1364 miniruby`rb_obj_transient_heap_evacuate: -> 0x1006e5178 <+328>: b 0x1006e517c ; <+332> at variable.c:1362:1 0x1006e517c <+332>: ldp x29, x30, [sp, #0x50] 0x1006e5180 <+336>: add sp, sp, #0x60 0x1006e5184 <+340>: ret Target 0: (miniruby) stopped. (lldb) bt * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 * frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 frame #1: 0x00000001006cb150 miniruby`transient_heap_block_evacuate(theap=0x0000000100b196c0, block=0x0000000107c00000) at transient_heap.c:734:17 frame #2: 0x00000001006c854c miniruby`transient_heap_evacuate(dmy=0x0000000000000000) at transient_heap.c:808:17 frame #3: 0x00000001007fe6c0 miniruby`rb_postponed_job_flush(vm=0x0000000104402900) at vm_trace.c:1773:21 frame #4: 0x0000000100637a84 miniruby`rb_threadptr_execute_interrupts(th=0x0000000103803bc0, blocking_timing=0) at thread.c:2316:13 frame #5: 0x000000010078b730 miniruby`rb_vm_check_ints(ec=0x00000001048038d0) at vm_core.h:2025:9 frame #6: 0x00000001006fbd10 miniruby`vm_pop_frame(ec=0x00000001048038d0, cfp=0x0000000104a04440, ep=0x0000000104904a28) at vm_insnhelper.c:422:5 frame #7: 0x00000001006fbca0 miniruby`rb_vm_pop_frame(ec=0x00000001048038d0) at vm_insnhelper.c:431:5 frame #8: 0x00000001007d6420 miniruby`vm_call0_cfunc_with_frame(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:153:9 frame #9: 0x00000001007d44cc miniruby`vm_call0_cfunc(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:164:12 frame #10: 0x0000000100766e80 miniruby`vm_call0_body(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:210:15 frame #11: 0x00000001007d76f0 miniruby`vm_call0_cc(ec=0x00000001048038d0, recv=0x000000010d6b49d8, id=2769, argc=0, argv=0x0000000000000000, cc=0x000000010d6b2e58, kw_splat=0) at vm_eval.c:87:12 frame #12: 0x0000000100769e48 miniruby`rb_funcallv_scope(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) at vm_eval.c:1051:16 frame #13: 0x0000000100760a54 miniruby`rb_funcallv(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000) at vm_eval.c:1066:12 frame #14: 0x000000010037513c miniruby`rb_inspect(obj=0x000000010d6b49d8) at object.c:633:34 frame #15: 0x000000010002c950 miniruby`inspect_ary(ary=0x000000010d6b4938, dummy=0x0000000000000000, recur=0) at array.c:3091:13 frame #16: 0x0000000100642020 miniruby`exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, pairid=0x0000000000000000, arg=0x0000000000000000, outer=0, mid=2769) at thread.c:5177:23 frame #17: 0x00000001006412fc miniruby`rb_exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, arg=0x0000000000000000) at thread.c:5205:12 frame #18: 0x00000001000127f0 miniruby`rb_ary_inspect(ary=0x000000010d6b4938) at array.c:3117:12 ``` In general though, any calls back out to the interpreter could change the IV buffer, so it's not safe to cache. [Bug #19095]
2022-11-01 01:38:51 +03:00
struct iv_itr_data itr_data;
itr_data.obj = obj;
itr_data.ivtbl = ivtbl;
itr_data.arg = arg;
2023-10-20 02:01:35 +03:00
itr_data.func = func;
if (rb_shape_obj_too_complex(obj)) {
rb_st_foreach(ivtbl->as.complex.table, each_hash_iv, (st_data_t)&itr_data);
2023-10-20 02:01:35 +03:00
}
else {
iterate_over_shapes_with_callback(shape, func, &itr_data);
}
}
static void
class_ivar_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
{
RUBY_ASSERT(RB_TYPE_P(obj, T_CLASS) || RB_TYPE_P(obj, T_MODULE));
rb_shape_t* shape = rb_shape_get_shape(obj);
Always lookup IV buffers when iterating Always look up instance variable buffers when iterating. It is possible for the instance variable buffer to change out from under the object during iteration, so we cannot cache the buffer on the stack. In the case of Bug #19095, the transient heap moved the buffer during iteration: ``` Watchpoint 1 hit: old value: 0x0000000107c00df8 new value: 0x00000001032743c0 Process 31720 stopped * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 1358 } 1359 MEMCPY(new_ptr, old_ptr, VALUE, len); 1360 ROBJECT(obj)->as.heap.ivptr = new_ptr; -> 1361 } 1362 } 1363 #endif 1364 miniruby`rb_obj_transient_heap_evacuate: -> 0x1006e5178 <+328>: b 0x1006e517c ; <+332> at variable.c:1362:1 0x1006e517c <+332>: ldp x29, x30, [sp, #0x50] 0x1006e5180 <+336>: add sp, sp, #0x60 0x1006e5184 <+340>: ret Target 0: (miniruby) stopped. (lldb) bt * thread #1, queue = 'com.apple.main-thread', stop reason = watchpoint 1 * frame #0: 0x00000001006e5178 miniruby`rb_obj_transient_heap_evacuate(obj=0x000000010d6b94b0, promote=1) at variable.c:1361:5 frame #1: 0x00000001006cb150 miniruby`transient_heap_block_evacuate(theap=0x0000000100b196c0, block=0x0000000107c00000) at transient_heap.c:734:17 frame #2: 0x00000001006c854c miniruby`transient_heap_evacuate(dmy=0x0000000000000000) at transient_heap.c:808:17 frame #3: 0x00000001007fe6c0 miniruby`rb_postponed_job_flush(vm=0x0000000104402900) at vm_trace.c:1773:21 frame #4: 0x0000000100637a84 miniruby`rb_threadptr_execute_interrupts(th=0x0000000103803bc0, blocking_timing=0) at thread.c:2316:13 frame #5: 0x000000010078b730 miniruby`rb_vm_check_ints(ec=0x00000001048038d0) at vm_core.h:2025:9 frame #6: 0x00000001006fbd10 miniruby`vm_pop_frame(ec=0x00000001048038d0, cfp=0x0000000104a04440, ep=0x0000000104904a28) at vm_insnhelper.c:422:5 frame #7: 0x00000001006fbca0 miniruby`rb_vm_pop_frame(ec=0x00000001048038d0) at vm_insnhelper.c:431:5 frame #8: 0x00000001007d6420 miniruby`vm_call0_cfunc_with_frame(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:153:9 frame #9: 0x00000001007d44cc miniruby`vm_call0_cfunc(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:164:12 frame #10: 0x0000000100766e80 miniruby`vm_call0_body(ec=0x00000001048038d0, calling=0x000000016fdcc6a0, argv=0x0000000000000000) at vm_eval.c:210:15 frame #11: 0x00000001007d76f0 miniruby`vm_call0_cc(ec=0x00000001048038d0, recv=0x000000010d6b49d8, id=2769, argc=0, argv=0x0000000000000000, cc=0x000000010d6b2e58, kw_splat=0) at vm_eval.c:87:12 frame #12: 0x0000000100769e48 miniruby`rb_funcallv_scope(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000, scope=CALL_FCALL) at vm_eval.c:1051:16 frame #13: 0x0000000100760a54 miniruby`rb_funcallv(recv=0x000000010d6b49d8, mid=2769, argc=0, argv=0x0000000000000000) at vm_eval.c:1066:12 frame #14: 0x000000010037513c miniruby`rb_inspect(obj=0x000000010d6b49d8) at object.c:633:34 frame #15: 0x000000010002c950 miniruby`inspect_ary(ary=0x000000010d6b4938, dummy=0x0000000000000000, recur=0) at array.c:3091:13 frame #16: 0x0000000100642020 miniruby`exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, pairid=0x0000000000000000, arg=0x0000000000000000, outer=0, mid=2769) at thread.c:5177:23 frame #17: 0x00000001006412fc miniruby`rb_exec_recursive(func=(miniruby`inspect_ary at array.c:3084), obj=0x000000010d6b4938, arg=0x0000000000000000) at thread.c:5205:12 frame #18: 0x00000001000127f0 miniruby`rb_ary_inspect(ary=0x000000010d6b4938) at array.c:3117:12 ``` In general though, any calls back out to the interpreter could change the IV buffer, so it's not safe to cache. [Bug #19095]
2022-11-01 01:38:51 +03:00
struct iv_itr_data itr_data;
itr_data.obj = obj;
itr_data.arg = arg;
itr_data.func = func;
if (rb_shape_obj_too_complex(obj)) {
rb_st_foreach(RCLASS_IV_HASH(obj), each_hash_iv, (st_data_t)&itr_data);
}
else {
iterate_over_shapes_with_callback(shape, func, &itr_data);
}
}
void
rb_copy_generic_ivar(VALUE clone, VALUE obj)
{
struct gen_ivtbl *obj_ivtbl;
struct gen_ivtbl *new_ivtbl;
rb_check_frozen(clone);
if (!FL_TEST(obj, FL_EXIVAR)) {
goto clear;
}
if (rb_gen_ivtbl_get(obj, 0, &obj_ivtbl)) {
if (gen_ivtbl_count(obj, obj_ivtbl) == 0)
goto clear;
FL_SET(clone, FL_EXIVAR);
if (rb_shape_obj_too_complex(obj)) {
new_ivtbl = xmalloc(sizeof(struct gen_ivtbl));
#if !SHAPE_IN_BASIC_FLAGS
new_ivtbl->shape_id = SHAPE_OBJ_TOO_COMPLEX;
#endif
new_ivtbl->as.complex.table = st_copy(obj_ivtbl->as.complex.table);
}
else {
new_ivtbl = gen_ivtbl_resize(0, obj_ivtbl->as.shape.numiv);
for (uint32_t i=0; i<obj_ivtbl->as.shape.numiv; i++) {
RB_OBJ_WRITE(clone, &new_ivtbl->as.shape.ivptr[i], obj_ivtbl->as.shape.ivptr[i]);
}
}
/*
* c.ivtbl may change in gen_ivar_copy due to realloc,
* no need to free
*/
RB_VM_LOCK_ENTER();
{
generic_ivtbl_no_ractor_check(clone);
st_insert(generic_ivtbl_no_ractor_check(obj), (st_data_t)clone, (st_data_t)new_ivtbl);
}
RB_VM_LOCK_LEAVE();
rb_shape_t * obj_shape = rb_shape_get_shape(obj);
if (rb_shape_frozen_shape_p(obj_shape)) {
rb_shape_set_shape_id(clone, obj_shape->parent_id);
}
else {
rb_shape_set_shape(clone, obj_shape);
}
}
return;
clear:
if (FL_TEST(clone, FL_EXIVAR)) {
rb_free_generic_ivar(clone);
FL_UNSET(clone, FL_EXIVAR);
}
}
void
rb_replace_generic_ivar(VALUE clone, VALUE obj)
{
RUBY_ASSERT(FL_TEST(obj, FL_EXIVAR));
RB_VM_LOCK_ENTER();
{
2021-06-23 04:50:09 +03:00
st_data_t ivtbl, obj_data = (st_data_t)obj;
if (st_lookup(generic_iv_tbl_, (st_data_t)obj, &ivtbl)) {
st_insert(generic_iv_tbl_, (st_data_t)clone, ivtbl);
st_delete(generic_iv_tbl_, &obj_data, NULL);
}
else {
rb_bug("unreachable");
}
}
RB_VM_LOCK_LEAVE();
FL_SET(clone, FL_EXIVAR);
}
void
rb_ivar_foreach(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
{
if (SPECIAL_CONST_P(obj)) return;
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
obj_ivar_each(obj, func, arg);
break;
case T_CLASS:
case T_MODULE:
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(0);
RB_VM_LOCK_ENTER();
{
class_ivar_each(obj, func, arg);
}
RB_VM_LOCK_LEAVE();
break;
default:
if (FL_TEST(obj, FL_EXIVAR)) {
gen_ivar_each(obj, func, arg);
}
break;
}
}
st_index_t
rb_ivar_count(VALUE obj)
{
if (SPECIAL_CONST_P(obj)) return 0;
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
return ROBJECT_IV_COUNT(obj);
case T_CLASS:
case T_MODULE:
return RCLASS_IV_COUNT(obj);
default:
if (FL_TEST(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
if (rb_gen_ivtbl_get(obj, 0, &ivtbl)) {
return gen_ivtbl_count(obj, ivtbl);
}
}
break;
}
return 0;
}
static int
ivar_i(ID key, VALUE v, st_data_t a)
{
VALUE ary = (VALUE)a;
if (rb_is_instance_id(key)) {
rb_ary_push(ary, ID2SYM(key));
}
return ST_CONTINUE;
}
/*
* call-seq:
* obj.instance_variables -> array
*
* Returns an array of instance variable names for the receiver. Note
* that simply defining an accessor does not create the corresponding
* instance variable.
*
* class Fred
* attr_accessor :a1
* def initialize
* @iv = 3
* end
* end
* Fred.new.instance_variables #=> [:@iv]
*/
VALUE
rb_obj_instance_variables(VALUE obj)
{
VALUE ary;
ary = rb_ary_new();
rb_ivar_foreach(obj, ivar_i, ary);
return ary;
}
#define rb_is_constant_id rb_is_const_id
#define rb_is_constant_name rb_is_const_name
#define id_for_var(obj, name, part, type) \
id_for_var_message(obj, name, type, "'%1$s' is not allowed as "#part" "#type" variable name")
#define id_for_var_message(obj, name, type, message) \
check_id_type(obj, &(name), rb_is_##type##_id, rb_is_##type##_name, message, strlen(message))
static ID
check_id_type(VALUE obj, VALUE *pname,
int (*valid_id_p)(ID), int (*valid_name_p)(VALUE),
const char *message, size_t message_len)
{
ID id = rb_check_id(pname);
VALUE name = *pname;
if (id ? !valid_id_p(id) : !valid_name_p(name)) {
rb_name_err_raise_str(rb_fstring_new(message, message_len),
obj, name);
}
return id;
}
/*
* call-seq:
* obj.remove_instance_variable(symbol) -> obj
* obj.remove_instance_variable(string) -> obj
*
* Removes the named instance variable from <i>obj</i>, returning that
2024-03-09 15:58:49 +03:00
* variable's value. The name can be passed as a symbol or as a string.
*
* class Dummy
* attr_reader :var
* def initialize
* @var = 99
* end
* def remove
* remove_instance_variable(:@var)
* end
* end
* d = Dummy.new
* d.var #=> 99
* d.remove #=> 99
* d.var #=> nil
*/
VALUE
rb_obj_remove_instance_variable(VALUE obj, VALUE name)
{
const ID id = id_for_var(obj, name, an, instance);
// Frozen check comes here because it's expected that we raise a
// NameError (from the id_for_var check) before we raise a FrozenError
rb_check_frozen(obj);
if (id) {
VALUE val = rb_ivar_delete(obj, id, Qundef);
2012-04-14 04:36:26 +04:00
2024-01-30 08:48:59 +03:00
if (!UNDEF_P(val)) return val;
}
rb_name_err_raise("instance variable %1$s not defined",
obj, name);
UNREACHABLE_RETURN(Qnil);
}
NORETURN(static void uninitialized_constant(VALUE, VALUE));
static void
uninitialized_constant(VALUE klass, VALUE name)
{
if (klass && rb_class_real(klass) != rb_cObject)
rb_name_err_raise("uninitialized constant %2$s::%1$s",
klass, name);
else
rb_name_err_raise("uninitialized constant %1$s",
klass, name);
}
VALUE
rb_const_missing(VALUE klass, VALUE name)
{
VALUE value = rb_funcallv(klass, idConst_missing, 1, &name);
rb_vm_inc_const_missing_count();
return value;
}
/*
* call-seq:
* mod.const_missing(sym) -> obj
*
* Invoked when a reference is made to an undefined constant in
* <i>mod</i>. It is passed a symbol for the undefined constant, and
2024-03-09 15:58:49 +03:00
* returns a value to be used for that constant. For example, consider:
*
* def Foo.const_missing(name)
* name # return the constant name as Symbol
* end
*
* Foo::UNDEFINED_CONST #=> :UNDEFINED_CONST: symbol returned
*
* As the example above shows, +const_missing+ is not required to create the
* missing constant in <i>mod</i>, though that is often a side-effect. The
* caller gets its return value when triggered. If the constant is also defined,
* further lookups won't hit +const_missing+ and will return the value stored in
* the constant as usual. Otherwise, +const_missing+ will be invoked again.
*
* In the next example, when a reference is made to an undefined constant,
* +const_missing+ attempts to load a file whose path is the lowercase version
* of the constant name (thus class <code>Fred</code> is assumed to be in file
* <code>fred.rb</code>). If defined as a side-effect of loading the file, the
* method returns the value stored in the constant. This implements an autoload
* feature similar to Kernel#autoload and Module#autoload, though it differs in
* important ways.
*
* def Object.const_missing(name)
* @looked_for ||= {}
* str_name = name.to_s
* raise "Constant not found: #{name}" if @looked_for[str_name]
* @looked_for[str_name] = 1
* file = str_name.downcase
* require file
* const_get(name, false)
* end
*
*/
VALUE
rb_mod_const_missing(VALUE klass, VALUE name)
{
rb_execution_context_t *ec = GET_EC();
VALUE ref = ec->private_const_reference;
rb_vm_pop_cfunc_frame();
if (ref) {
ec->private_const_reference = 0;
rb_name_err_raise("private constant %2$s::%1$s referenced", ref, name);
}
uninitialized_constant(klass, name);
2012-04-14 04:36:26 +04:00
UNREACHABLE_RETURN(Qnil);
}
static void
autoload_table_mark(void *ptr)
{
rb_mark_tbl_no_pin((st_table *)ptr);
}
static void
autoload_table_free(void *ptr)
{
st_free_table((st_table *)ptr);
}
static size_t
autoload_table_memsize(const void *ptr)
{
const st_table *tbl = ptr;
return st_memsize(tbl);
}
static void
autoload_table_compact(void *ptr)
{
rb_gc_update_tbl_refs((st_table *)ptr);
}
static const rb_data_type_t autoload_table_type = {
"autoload_table",
{autoload_table_mark, autoload_table_free, autoload_table_memsize, autoload_table_compact,},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
};
#define check_autoload_table(av) \
(struct st_table *)rb_check_typeddata((av), &autoload_table_type)
static VALUE
autoload_data(VALUE mod, ID id)
{
struct st_table *tbl;
st_data_t val;
// If we are called with a non-origin ICLASS, fetch the autoload data from
// the original module.
if (RB_TYPE_P(mod, T_ICLASS)) {
if (FL_TEST_RAW(mod, RICLASS_IS_ORIGIN)) {
return 0;
2022-11-10 03:11:20 +03:00
}
else {
mod = RBASIC(mod)->klass;
}
}
RUBY_ASSERT(RB_TYPE_P(mod, T_CLASS) || RB_TYPE_P(mod, T_MODULE));
// Look up the instance variable table for `autoload`, then index into that table with the given constant name `id`.
VALUE tbl_value = rb_ivar_lookup(mod, autoload, Qfalse);
if (!RTEST(tbl_value) || !(tbl = check_autoload_table(tbl_value)) || !st_lookup(tbl, (st_data_t)id, &val)) {
return 0;
}
return (VALUE)val;
}
// Every autoload constant has exactly one instance of autoload_const, stored in `autoload_features`. Since multiple autoload constants can refer to the same file, every `autoload_const` refers to a de-duplicated `autoload_data`.
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
struct autoload_const {
// The linked list node of all constants which are loaded by the related autoload feature.
struct ccan_list_node cnode; /* <=> autoload_data.constants */
// The shared "autoload_data" if multiple constants are defined from the same feature.
VALUE autoload_data_value;
// The module we are loading a constant into.
VALUE module;
// The name of the constant we are loading.
2022-05-21 15:32:41 +03:00
ID name;
// The value of the constant (after it's loaded).
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
VALUE value;
// The constant entry flags which need to be re-applied after autoloading the feature.
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
rb_const_flag_t flag;
// The source file and line number that defined this constant (different from feature path).
VALUE file;
int line;
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
};
// Each `autoload_data` uniquely represents a specific feature which can be loaded, and a list of constants which it is able to define. We use a mutex to coordinate multiple threads trying to load the same feature.
struct autoload_data {
// The feature path to require to load this constant.
VALUE feature;
// The mutex which is protecting autoloading this feature.
VALUE mutex;
// The process fork serial number since the autoload mutex will become invalid on fork.
rb_serial_t fork_gen;
// The linked list of all constants that are going to be loaded by this autoload.
struct ccan_list_head constants; /* <=> autoload_const.cnode */
};
static void
autoload_data_compact(void *ptr)
{
struct autoload_data *p = ptr;
p->feature = rb_gc_location(p->feature);
p->mutex = rb_gc_location(p->mutex);
}
static void
autoload_data_mark(void *ptr)
{
struct autoload_data *p = ptr;
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
rb_gc_mark_movable(p->feature);
rb_gc_mark_movable(p->mutex);
}
static void
autoload_data_free(void *ptr)
{
struct autoload_data *p = ptr;
struct autoload_const *autoload_const, *next;
ccan_list_for_each_safe(&p->constants, autoload_const, next, cnode) {
ccan_list_del_init(&autoload_const->cnode);
}
ruby_xfree(p);
}
static size_t
autoload_data_memsize(const void *ptr)
{
return sizeof(struct autoload_data);
}
static const rb_data_type_t autoload_data_type = {
"autoload_data",
{autoload_data_mark, autoload_data_free, autoload_data_memsize, autoload_data_compact},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
};
static void
autoload_const_compact(void *ptr)
{
struct autoload_const *ac = ptr;
ac->module = rb_gc_location(ac->module);
ac->autoload_data_value = rb_gc_location(ac->autoload_data_value);
ac->value = rb_gc_location(ac->value);
ac->file = rb_gc_location(ac->file);
}
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
static void
autoload_const_mark(void *ptr)
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
{
struct autoload_const *ac = ptr;
rb_gc_mark_movable(ac->module);
rb_gc_mark_movable(ac->autoload_data_value);
rb_gc_mark_movable(ac->value);
rb_gc_mark_movable(ac->file);
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
}
static size_t
autoload_const_memsize(const void *ptr)
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
{
return sizeof(struct autoload_const);
}
static void
autoload_const_free(void *ptr)
{
struct autoload_const *autoload_const = ptr;
ccan_list_del(&autoload_const->cnode);
ruby_xfree(ptr);
}
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
static const rb_data_type_t autoload_const_type = {
"autoload_const",
{autoload_const_mark, autoload_const_free, autoload_const_memsize, autoload_const_compact,},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
};
static struct autoload_data *
get_autoload_data(VALUE autoload_const_value, struct autoload_const **autoload_const_pointer)
{
struct autoload_const *autoload_const = rb_check_typeddata(autoload_const_value, &autoload_const_type);
VALUE autoload_data_value = autoload_const->autoload_data_value;
struct autoload_data *autoload_data = rb_check_typeddata(autoload_data_value, &autoload_data_type);
/* do not reach across stack for ->state after forking: */
if (autoload_data && autoload_data->fork_gen != GET_VM()->fork_gen) {
RB_OBJ_WRITE(autoload_data_value, &autoload_data->mutex, Qnil);
autoload_data->fork_gen = 0;
}
if (autoload_const_pointer) *autoload_const_pointer = autoload_const;
return autoload_data;
}
2024-02-23 00:02:10 +03:00
void
rb_autoload(VALUE module, ID name, const char *feature)
{
if (!feature || !*feature) {
rb_raise(rb_eArgError, "empty feature name");
}
rb_autoload_str(module, name, rb_fstring_cstr(feature));
}
static void const_set(VALUE klass, ID id, VALUE val);
static void const_added(VALUE klass, ID const_name);
struct autoload_arguments {
VALUE module;
ID name;
VALUE feature;
};
static VALUE
autoload_feature_lookup_or_create(VALUE feature, struct autoload_data **autoload_data_pointer)
{
RUBY_ASSERT_MUTEX_OWNED(autoload_mutex);
RUBY_ASSERT_CRITICAL_SECTION_ENTER();
VALUE autoload_data_value = rb_hash_aref(autoload_features, feature);
struct autoload_data *autoload_data;
if (NIL_P(autoload_data_value)) {
autoload_data_value = TypedData_Make_Struct(0, struct autoload_data, &autoload_data_type, autoload_data);
RB_OBJ_WRITE(autoload_data_value, &autoload_data->feature, feature);
RB_OBJ_WRITE(autoload_data_value, &autoload_data->mutex, Qnil);
ccan_list_head_init(&autoload_data->constants);
if (autoload_data_pointer) *autoload_data_pointer = autoload_data;
rb_hash_aset(autoload_features, feature, autoload_data_value);
2022-05-19 10:45:37 +03:00
}
else if (autoload_data_pointer) {
*autoload_data_pointer = rb_check_typeddata(autoload_data_value, &autoload_data_type);
}
RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
return autoload_data_value;
}
static VALUE
2022-05-19 10:45:37 +03:00
autoload_table_lookup_or_create(VALUE module)
{
VALUE autoload_table_value = rb_ivar_lookup(module, autoload, Qfalse);
if (RTEST(autoload_table_value)) {
return autoload_table_value;
2022-11-10 03:11:20 +03:00
}
else {
autoload_table_value = TypedData_Wrap_Struct(0, &autoload_table_type, NULL);
rb_class_ivar_set(module, autoload, autoload_table_value);
RTYPEDDATA_DATA(autoload_table_value) = st_init_numtable();
return autoload_table_value;
}
}
static VALUE
autoload_synchronized(VALUE _arguments)
{
struct autoload_arguments *arguments = (struct autoload_arguments *)_arguments;
rb_const_entry_t *constant_entry = rb_const_lookup(arguments->module, arguments->name);
2022-11-15 07:24:08 +03:00
if (constant_entry && !UNDEF_P(constant_entry->value)) {
return Qfalse;
}
// Reset any state associated with any previous constant:
const_set(arguments->module, arguments->name, Qundef);
VALUE autoload_table_value = autoload_table_lookup_or_create(arguments->module);
struct st_table *autoload_table = check_autoload_table(autoload_table_value);
// Ensure the string is uniqued since we use an identity lookup:
VALUE feature = rb_fstring(arguments->feature);
struct autoload_data *autoload_data;
VALUE autoload_data_value = autoload_feature_lookup_or_create(feature, &autoload_data);
{
struct autoload_const *autoload_const;
VALUE autoload_const_value = TypedData_Make_Struct(0, struct autoload_const, &autoload_const_type, autoload_const);
autoload_const->module = arguments->module;
autoload_const->name = arguments->name;
autoload_const->value = Qundef;
autoload_const->flag = CONST_PUBLIC;
autoload_const->autoload_data_value = autoload_data_value;
ccan_list_add_tail(&autoload_data->constants, &autoload_const->cnode);
st_insert(autoload_table, (st_data_t)arguments->name, (st_data_t)autoload_const_value);
RB_OBJ_WRITTEN(autoload_table_value, Qundef, autoload_const_value);
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
}
return Qtrue;
}
void
rb_autoload_str(VALUE module, ID name, VALUE feature)
{
if (!rb_is_const_id(name)) {
rb_raise(rb_eNameError, "autoload must be constant name: %"PRIsVALUE"", QUOTE_ID(name));
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
}
Check_Type(feature, T_STRING);
if (!RSTRING_LEN(feature)) {
rb_raise(rb_eArgError, "empty feature name");
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
}
struct autoload_arguments arguments = {
.module = module,
.name = name,
.feature = feature,
};
VALUE result = rb_mutex_synchronize(autoload_mutex, autoload_synchronized, (VALUE)&arguments);
if (result == Qtrue) {
const_added(module, name);
}
}
static void
autoload_delete(VALUE module, ID name)
{
RUBY_ASSERT_CRITICAL_SECTION_ENTER();
st_data_t load = 0, key = name;
RUBY_ASSERT(RB_TYPE_P(module, T_CLASS) || RB_TYPE_P(module, T_MODULE));
VALUE table_value = rb_ivar_lookup(module, autoload, Qfalse);
if (RTEST(table_value)) {
struct st_table *table = check_autoload_table(table_value);
st_delete(table, &key, &load);
RB_OBJ_WRITTEN(table_value, load, Qundef);
/* Qfalse can indicate already deleted */
if (load != Qfalse) {
struct autoload_const *autoload_const;
struct autoload_data *autoload_data = get_autoload_data((VALUE)load, &autoload_const);
VM_ASSERT(autoload_data);
VM_ASSERT(!ccan_list_empty(&autoload_data->constants));
/*
* we must delete here to avoid "already initialized" warnings
* with parallel autoload. Using list_del_init here so list_del
* works in autoload_const_free
*/
ccan_list_del_init(&autoload_const->cnode);
variable.c: fix multiple autoload with identical file We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63387 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 04:19:20 +03:00
if (ccan_list_empty(&autoload_data->constants)) {
rb_hash_delete(autoload_features, autoload_data->feature);
}
// If the autoload table is empty, we can delete it.
if (table->num_entries == 0) {
rb_attr_delete(module, autoload);
}
}
}
RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
}
static int
autoload_by_someone_else(struct autoload_data *ele)
{
return ele->mutex != Qnil && !rb_mutex_owned_p(ele->mutex);
}
static VALUE
check_autoload_required(VALUE mod, ID id, const char **loadingpath)
{
VALUE autoload_const_value = autoload_data(mod, id);
struct autoload_data *autoload_data;
const char *loading;
if (!autoload_const_value || !(autoload_data = get_autoload_data(autoload_const_value, 0))) {
return 0;
}
VALUE feature = autoload_data->feature;
/*
* if somebody else is autoloading, we MUST wait for them, since
* rb_provide_feature can provide a feature before autoload_const_set
* completes. We must wait until autoload_const_set finishes in
* the other thread.
*/
if (autoload_by_someone_else(autoload_data)) {
return autoload_const_value;
}
loading = RSTRING_PTR(feature);
if (!rb_feature_provided(loading, &loading)) {
return autoload_const_value;
}
if (loadingpath && loading) {
*loadingpath = loading;
return autoload_const_value;
}
return 0;
}
2019-06-22 20:49:39 +03:00
static struct autoload_const *autoloading_const_entry(VALUE mod, ID id);
2018-12-13 12:12:07 +03:00
2023-03-07 08:34:31 +03:00
int
rb_autoloading_value(VALUE mod, ID id, VALUE* value, rb_const_flag_t *flag)
2018-12-13 12:12:07 +03:00
{
struct autoload_const *ac = autoloading_const_entry(mod, id);
if (!ac) return FALSE;
if (value) {
*value = ac->value;
}
2018-12-13 12:12:07 +03:00
if (flag) {
*flag = ac->flag;
}
2018-12-13 12:12:07 +03:00
return TRUE;
}
static int
autoload_by_current(struct autoload_data *ele)
{
return ele->mutex != Qnil && rb_mutex_owned_p(ele->mutex);
}
// If there is an autoloading constant and it has been set by the current
// execution context, return it. This allows threads which are loading code to
// refer to their own autoloaded constants.
2018-12-13 12:12:07 +03:00
struct autoload_const *
autoloading_const_entry(VALUE mod, ID id)
{
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
VALUE load = autoload_data(mod, id);
struct autoload_data *ele;
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
struct autoload_const *ac;
// Find the autoloading state:
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
if (!load || !(ele = get_autoload_data(load, &ac))) {
// Couldn't be found:
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
return 0;
}
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
// Check if it's being loaded by the current thread/fiber:
if (autoload_by_current(ele)) {
2022-11-15 07:24:08 +03:00
if (!UNDEF_P(ac->value)) {
2018-12-13 12:12:07 +03:00
return ac;
}
}
return 0;
}
static int
autoload_defined_p(VALUE mod, ID id)
{
rb_const_entry_t *ce = rb_const_lookup(mod, id);
// If there is no constant or the constant is not undefined (special marker for autoloading):
2022-11-15 07:24:08 +03:00
if (!ce || !UNDEF_P(ce->value)) {
// We are not autoloading:
return 0;
}
// Otherwise check if there is an autoload in flight right now:
return !rb_autoloading_value(mod, id, NULL, NULL);
}
static void const_tbl_update(struct autoload_const *, int);
struct autoload_load_arguments {
VALUE module;
ID name;
int flag;
VALUE mutex;
// The specific constant which triggered the autoload code to fire:
struct autoload_const *autoload_const;
// The parent autoload data which is shared between multiple constants:
struct autoload_data *autoload_data;
};
static VALUE
2020-04-07 14:20:34 +03:00
autoload_const_set(struct autoload_const *ac)
{
check_before_mod_set(ac->module, ac->name, ac->value, "constant");
RB_VM_LOCK_ENTER();
{
const_tbl_update(ac, true);
}
RB_VM_LOCK_LEAVE();
return 0; /* ignored */
}
static VALUE
autoload_load_needed(VALUE _arguments)
{
struct autoload_load_arguments *arguments = (struct autoload_load_arguments*)_arguments;
const char *loading = 0, *src;
if (!autoload_defined_p(arguments->module, arguments->name)) {
return Qfalse;
}
2022-05-21 15:32:41 +03:00
VALUE autoload_const_value = check_autoload_required(arguments->module, arguments->name, &loading);
if (!autoload_const_value) {
return Qfalse;
}
src = rb_sourcefile();
if (src && loading && strcmp(src, loading) == 0) {
return Qfalse;
}
struct autoload_const *autoload_const;
2022-05-21 15:32:41 +03:00
struct autoload_data *autoload_data;
if (!(autoload_data = get_autoload_data(autoload_const_value, &autoload_const))) {
return Qfalse;
}
2022-06-29 15:59:39 +03:00
if (NIL_P(autoload_data->mutex)) {
RB_OBJ_WRITE(autoload_const->autoload_data_value, &autoload_data->mutex, rb_mutex_new());
2022-05-21 15:32:41 +03:00
autoload_data->fork_gen = GET_VM()->fork_gen;
}
2022-05-21 15:32:41 +03:00
else if (rb_mutex_owned_p(autoload_data->mutex)) {
return Qfalse;
}
2022-05-21 15:32:41 +03:00
arguments->mutex = autoload_data->mutex;
arguments->autoload_const = autoload_const;
2022-05-21 15:32:41 +03:00
return autoload_const_value;
}
static VALUE
autoload_apply_constants(VALUE _arguments)
{
RUBY_ASSERT_CRITICAL_SECTION_ENTER();
struct autoload_load_arguments *arguments = (struct autoload_load_arguments*)_arguments;
struct autoload_const *autoload_const = 0; // for ccan_container_off_var()
2022-05-26 03:44:01 +03:00
struct autoload_const *next;
2022-05-26 03:44:01 +03:00
// We use safe iteration here because `autoload_const_set` will eventually invoke
// `autoload_delete` which will remove the constant from the linked list. In theory, once
// the `autoload_data->constants` linked list is empty, we can remove it.
2022-05-26 03:44:01 +03:00
// Iterate over all constants and assign them:
ccan_list_for_each_safe(&arguments->autoload_data->constants, autoload_const, next, cnode) {
2022-11-15 07:24:08 +03:00
if (!UNDEF_P(autoload_const->value)) {
2022-05-26 03:44:01 +03:00
autoload_const_set(autoload_const);
}
}
RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
return Qtrue;
}
static VALUE
2022-05-26 03:44:01 +03:00
autoload_feature_require(VALUE _arguments)
{
2022-05-26 03:44:01 +03:00
struct autoload_load_arguments *arguments = (struct autoload_load_arguments*)_arguments;
struct autoload_const *autoload_const = arguments->autoload_const;
// We save this for later use in autoload_apply_constants:
arguments->autoload_data = rb_check_typeddata(autoload_const->autoload_data_value, &autoload_data_type);
VALUE result = rb_funcall(rb_vm_top_self(), rb_intern("require"), 1, arguments->autoload_data->feature);
if (RTEST(result)) {
return rb_mutex_synchronize(autoload_mutex, autoload_apply_constants, _arguments);
}
return result;
}
static VALUE
autoload_try_load(VALUE _arguments)
{
struct autoload_load_arguments *arguments = (struct autoload_load_arguments*)_arguments;
2022-05-26 03:44:01 +03:00
VALUE result = autoload_feature_require(_arguments);
// After we loaded the feature, if the constant is not defined, we remove it completely:
rb_const_entry_t *ce = rb_const_lookup(arguments->module, arguments->name);
2022-11-15 07:24:08 +03:00
if (!ce || UNDEF_P(ce->value)) {
2022-05-26 03:44:01 +03:00
result = Qfalse;
rb_const_remove(arguments->module, arguments->name);
if (arguments->module == rb_cObject) {
rb_warning(
"Expected %"PRIsVALUE" to define %"PRIsVALUE" but it didn't",
arguments->autoload_data->feature,
ID2SYM(arguments->name)
);
}
else {
rb_warning(
"Expected %"PRIsVALUE" to define %"PRIsVALUE"::%"PRIsVALUE" but it didn't",
arguments->autoload_data->feature,
arguments->module,
ID2SYM(arguments->name)
);
}
2022-05-19 10:45:37 +03:00
}
else {
// Otherwise, it was loaded, copy the flags from the autoload constant:
ce->flag |= arguments->flag;
}
2022-05-26 03:44:01 +03:00
return result;
}
VALUE
rb_autoload_load(VALUE module, ID name)
{
rb_const_entry_t *ce = rb_const_lookup(module, name);
// We bail out as early as possible without any synchronisation:
2022-11-15 07:24:08 +03:00
if (!ce || !UNDEF_P(ce->value)) {
return Qfalse;
}
// At this point, we assume there might be autoloading, so fail if it's ractor:
if (UNLIKELY(!rb_ractor_main_p())) {
rb_raise(rb_eRactorUnsafeError, "require by autoload on non-main Ractor is not supported (%s)", rb_id2name(name));
}
2023-02-28 21:05:30 +03:00
// This state is stored on the stack and is used during the autoload process.
2022-05-26 03:44:01 +03:00
struct autoload_load_arguments arguments = {.module = module, .name = name, .mutex = Qnil};
// Figure out whether we can autoload the named constant:
2022-05-21 15:32:41 +03:00
VALUE autoload_const_value = rb_mutex_synchronize(autoload_mutex, autoload_load_needed, (VALUE)&arguments);
// This confirms whether autoloading is required or not:
2022-05-21 15:32:41 +03:00
if (autoload_const_value == Qfalse) return autoload_const_value;
arguments.flag = ce->flag & (CONST_DEPRECATED | CONST_VISIBILITY_MASK);
// Only one thread will enter here at a time:
2022-05-25 14:12:54 +03:00
VALUE result = rb_mutex_synchronize(arguments.mutex, autoload_try_load, (VALUE)&arguments);
// If you don't guard this value, it's possible for the autoload constant to
// be freed by another thread which loads multiple constants, one of which
// resolves to the constant this thread is trying to load, so proteect this
// so that it is not freed until we are done with it in `autoload_try_load`:
RB_GC_GUARD(autoload_const_value);
return result;
}
VALUE
rb_autoload_p(VALUE mod, ID id)
{
return rb_autoload_at_p(mod, id, TRUE);
}
VALUE
rb_autoload_at_p(VALUE mod, ID id, int recur)
{
VALUE load;
struct autoload_data *ele;
while (!autoload_defined_p(mod, id)) {
2019-06-21 11:29:08 +03:00
if (!recur) return Qnil;
mod = RCLASS_SUPER(mod);
if (!mod) return Qnil;
}
load = check_autoload_required(mod, id, 0);
if (!load) return Qnil;
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
return (ele = get_autoload_data(load, 0)) ? ele->feature : Qnil;
}
2023-03-07 08:34:31 +03:00
void
rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id)
{
if (RB_CONST_DEPRECATED_P(ce) &&
rb_warning_category_enabled_p(RB_WARN_CATEGORY_DEPRECATED)) {
if (klass == rb_cObject) {
rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "constant ::%"PRIsVALUE" is deprecated", QUOTE_ID(id));
}
else {
rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "constant %"PRIsVALUE"::%"PRIsVALUE" is deprecated",
rb_class_name(klass), QUOTE_ID(id));
}
}
}
static VALUE
rb_const_get_0(VALUE klass, ID id, int exclude, int recurse, int visibility)
{
VALUE c = rb_const_search(klass, id, exclude, recurse, visibility);
2022-11-15 07:24:08 +03:00
if (!UNDEF_P(c)) {
if (UNLIKELY(!rb_ractor_main_p())) {
if (!rb_ractor_shareable_p(c)) {
rb_raise(rb_eRactorIsolationError, "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main Ractor.", rb_class_path(klass), rb_id2name(id));
}
}
return c;
}
return rb_const_missing(klass, ID2SYM(id));
}
static VALUE
2018-12-13 12:27:32 +03:00
rb_const_search_from(VALUE klass, ID id, int exclude, int recurse, int visibility)
{
VALUE value, current;
bool first_iteration = true;
for (current = klass;
RTEST(current);
current = RCLASS_SUPER(current), first_iteration = false) {
VALUE tmp;
VALUE am = 0;
rb_const_entry_t *ce;
if (!first_iteration && RCLASS_ORIGIN(current) != current) {
// This item in the super chain has an origin iclass
// that comes later in the chain. Skip this item so
// prepended modules take precedence.
continue;
}
// Do lookup in original class or module in case we are at an origin
// iclass in the chain.
tmp = current;
if (BUILTIN_TYPE(tmp) == T_ICLASS) tmp = RBASIC(tmp)->klass;
// Do the lookup. Loop in case of autoload.
while ((ce = rb_const_lookup(tmp, id))) {
if (visibility && RB_CONST_PRIVATE_P(ce)) {
GET_EC()->private_const_reference = tmp;
return Qundef;
}
rb_const_warn_if_deprecated(ce, tmp, id);
value = ce->value;
2022-11-15 07:24:08 +03:00
if (UNDEF_P(value)) {
2018-12-13 12:12:07 +03:00
struct autoload_const *ac;
if (am == tmp) break;
am = tmp;
2018-12-13 12:12:07 +03:00
ac = autoloading_const_entry(tmp, id);
if (ac) return ac->value;
rb_autoload_load(tmp, id);
continue;
}
2019-06-22 19:47:40 +03:00
if (exclude && tmp == rb_cObject) {
goto not_found;
}
return value;
}
if (!recurse) break;
}
not_found:
GET_EC()->private_const_reference = 0;
return Qundef;
}
2018-12-13 12:27:32 +03:00
static VALUE
rb_const_search(VALUE klass, ID id, int exclude, int recurse, int visibility)
{
VALUE value;
if (klass == rb_cObject) exclude = FALSE;
value = rb_const_search_from(klass, id, exclude, recurse, visibility);
2022-11-15 07:24:08 +03:00
if (!UNDEF_P(value)) return value;
2018-12-13 12:27:32 +03:00
if (exclude) return value;
if (BUILTIN_TYPE(klass) != T_MODULE) return value;
/* search global const too, if klass is a module */
return rb_const_search_from(rb_cObject, id, FALSE, recurse, visibility);
}
VALUE
rb_const_get_from(VALUE klass, ID id)
{
return rb_const_get_0(klass, id, TRUE, TRUE, FALSE);
}
VALUE
rb_const_get(VALUE klass, ID id)
{
return rb_const_get_0(klass, id, FALSE, TRUE, FALSE);
}
VALUE
rb_const_get_at(VALUE klass, ID id)
{
return rb_const_get_0(klass, id, TRUE, FALSE, FALSE);
}
2023-03-07 08:34:31 +03:00
VALUE
rb_public_const_get_from(VALUE klass, ID id)
{
return rb_const_get_0(klass, id, TRUE, TRUE, TRUE);
}
2023-03-07 08:34:31 +03:00
VALUE
rb_public_const_get_at(VALUE klass, ID id)
{
return rb_const_get_0(klass, id, TRUE, FALSE, TRUE);
}
NORETURN(static void undefined_constant(VALUE mod, VALUE name));
static void
undefined_constant(VALUE mod, VALUE name)
{
rb_name_err_raise("constant %2$s::%1$s not defined",
mod, name);
}
static VALUE
rb_const_location_from(VALUE klass, ID id, int exclude, int recurse, int visibility)
{
while (RTEST(klass)) {
2019-06-22 19:47:40 +03:00
rb_const_entry_t *ce;
2019-06-22 19:47:40 +03:00
while ((ce = rb_const_lookup(klass, id))) {
if (visibility && RB_CONST_PRIVATE_P(ce)) {
return Qnil;
}
if (exclude && klass == rb_cObject) {
goto not_found;
}
if (UNDEF_P(ce->value)) { // autoload
VALUE autoload_const_value = autoload_data(klass, id);
if (RTEST(autoload_const_value)) {
struct autoload_const *autoload_const;
struct autoload_data *autoload_data = get_autoload_data(autoload_const_value, &autoload_const);
if (!UNDEF_P(autoload_const->value) && RTEST(rb_mutex_owned_p(autoload_data->mutex))) {
return rb_assoc_new(autoload_const->file, INT2NUM(autoload_const->line));
}
}
}
2019-06-22 19:47:40 +03:00
if (NIL_P(ce->file)) return rb_ary_new();
return rb_assoc_new(ce->file, INT2NUM(ce->line));
}
if (!recurse) break;
klass = RCLASS_SUPER(klass);
}
not_found:
return Qnil;
}
static VALUE
rb_const_location(VALUE klass, ID id, int exclude, int recurse, int visibility)
{
VALUE loc;
if (klass == rb_cObject) exclude = FALSE;
loc = rb_const_location_from(klass, id, exclude, recurse, visibility);
if (!NIL_P(loc)) return loc;
if (exclude) return loc;
if (BUILTIN_TYPE(klass) != T_MODULE) return loc;
/* search global const too, if klass is a module */
return rb_const_location_from(rb_cObject, id, FALSE, recurse, visibility);
}
VALUE
rb_const_source_location(VALUE klass, ID id)
{
return rb_const_location(klass, id, FALSE, TRUE, FALSE);
}
2023-03-07 08:34:31 +03:00
VALUE
rb_const_source_location_at(VALUE klass, ID id)
{
return rb_const_location(klass, id, TRUE, FALSE, FALSE);
}
/*
* call-seq:
* remove_const(sym) -> obj
*
* Removes the definition of the given constant, returning that
* constant's previous value. If that constant referred to
* a module, this will not change that module's name and can lead
* to confusion.
*/
VALUE
rb_mod_remove_const(VALUE mod, VALUE name)
{
const ID id = id_for_var(mod, name, a, constant);
if (!id) {
undefined_constant(mod, name);
}
return rb_const_remove(mod, id);
}
VALUE
rb_const_remove(VALUE mod, ID id)
{
VALUE val;
rb_const_entry_t *ce;
rb_check_frozen(mod);
ce = rb_const_lookup(mod, id);
if (!ce || !rb_id_table_delete(RCLASS_CONST_TBL(mod), id)) {
if (rb_const_defined_at(mod, id)) {
rb_name_err_raise("cannot remove %2$s::%1$s", mod, ID2SYM(id));
}
undefined_constant(mod, ID2SYM(id));
}
rb_clear_constant_cache_for_id(id);
val = ce->value;
2022-11-15 07:24:08 +03:00
if (UNDEF_P(val)) {
autoload_delete(mod, id);
val = Qnil;
}
ruby_xfree(ce);
return val;
}
static int
cv_i_update(st_data_t *k, st_data_t *v, st_data_t a, int existing)
{
if (existing) return ST_STOP;
*v = a;
return ST_CONTINUE;
}
static enum rb_id_table_iterator_result
sv_i(ID key, VALUE v, void *a)
{
rb_const_entry_t *ce = (rb_const_entry_t *)v;
st_table *tbl = a;
if (rb_is_const_id(key)) {
st_update(tbl, (st_data_t)key, cv_i_update, (st_data_t)ce);
}
return ID_TABLE_CONTINUE;
}
static enum rb_id_table_iterator_result
rb_local_constants_i(ID const_name, VALUE const_value, void *ary)
{
if (rb_is_const_id(const_name) && !RB_CONST_PRIVATE_P((rb_const_entry_t *)const_value)) {
rb_ary_push((VALUE)ary, ID2SYM(const_name));
}
return ID_TABLE_CONTINUE;
}
static VALUE
rb_local_constants(VALUE mod)
{
struct rb_id_table *tbl = RCLASS_CONST_TBL(mod);
VALUE ary;
if (!tbl) return rb_ary_new2(0);
RB_VM_LOCK_ENTER();
{
ary = rb_ary_new2(rb_id_table_size(tbl));
rb_id_table_foreach(tbl, rb_local_constants_i, (void *)ary);
}
RB_VM_LOCK_LEAVE();
return ary;
}
void*
rb_mod_const_at(VALUE mod, void *data)
{
st_table *tbl = data;
if (!tbl) {
tbl = st_init_numtable();
}
if (RCLASS_CONST_TBL(mod)) {
RB_VM_LOCK_ENTER();
{
rb_id_table_foreach(RCLASS_CONST_TBL(mod), sv_i, tbl);
}
RB_VM_LOCK_LEAVE();
}
return tbl;
}
void*
rb_mod_const_of(VALUE mod, void *data)
{
VALUE tmp = mod;
for (;;) {
data = rb_mod_const_at(tmp, data);
tmp = RCLASS_SUPER(tmp);
if (!tmp) break;
if (tmp == rb_cObject && mod != rb_cObject) break;
}
return data;
}
static int
list_i(st_data_t key, st_data_t value, VALUE ary)
{
ID sym = (ID)key;
rb_const_entry_t *ce = (rb_const_entry_t *)value;
if (RB_CONST_PUBLIC_P(ce)) rb_ary_push(ary, ID2SYM(sym));
return ST_CONTINUE;
}
VALUE
rb_const_list(void *data)
{
st_table *tbl = data;
VALUE ary;
if (!tbl) return rb_ary_new2(0);
ary = rb_ary_new2(tbl->num_entries);
st_foreach_safe(tbl, list_i, ary);
st_free_table(tbl);
return ary;
}
/*
* call-seq:
* mod.constants(inherit=true) -> array
*
* Returns an array of the names of the constants accessible in
* <i>mod</i>. This includes the names of constants in any included
* modules (example at start of section), unless the <i>inherit</i>
* parameter is set to <code>false</code>.
*
* The implementation makes no guarantees about the order in which the
* constants are yielded.
*
* IO.constants.include?(:SYNC) #=> true
* IO.constants(false).include?(:SYNC) #=> false
*
* Also see Module#const_defined?.
*/
VALUE
rb_mod_constants(int argc, const VALUE *argv, VALUE mod)
{
2019-11-25 09:05:53 +03:00
bool inherit = true;
if (rb_check_arity(argc, 0, 1)) inherit = RTEST(argv[0]);
if (inherit) {
return rb_const_list(rb_mod_const_of(mod, 0));
}
else {
return rb_local_constants(mod);
}
}
static int
rb_const_defined_0(VALUE klass, ID id, int exclude, int recurse, int visibility)
{
VALUE tmp;
int mod_retry = 0;
rb_const_entry_t *ce;
tmp = klass;
retry:
while (tmp) {
if ((ce = rb_const_lookup(tmp, id))) {
if (visibility && RB_CONST_PRIVATE_P(ce)) {
return (int)Qfalse;
}
2022-11-15 07:24:08 +03:00
if (UNDEF_P(ce->value) && !check_autoload_required(tmp, id, 0) &&
!rb_autoloading_value(tmp, id, NULL, NULL))
return (int)Qfalse;
2022-07-21 19:23:58 +03:00
if (exclude && tmp == rb_cObject && klass != rb_cObject) {
return (int)Qfalse;
}
2022-07-21 19:23:58 +03:00
return (int)Qtrue;
}
if (!recurse) break;
tmp = RCLASS_SUPER(tmp);
}
if (!exclude && !mod_retry && BUILTIN_TYPE(klass) == T_MODULE) {
mod_retry = 1;
tmp = rb_cObject;
goto retry;
}
return (int)Qfalse;
}
int
rb_const_defined_from(VALUE klass, ID id)
{
return rb_const_defined_0(klass, id, TRUE, TRUE, FALSE);
}
int
rb_const_defined(VALUE klass, ID id)
{
return rb_const_defined_0(klass, id, FALSE, TRUE, FALSE);
}
int
rb_const_defined_at(VALUE klass, ID id)
{
return rb_const_defined_0(klass, id, TRUE, FALSE, FALSE);
}
2023-03-07 08:34:31 +03:00
int
rb_public_const_defined_from(VALUE klass, ID id)
{
return rb_const_defined_0(klass, id, TRUE, TRUE, TRUE);
}
static void
check_before_mod_set(VALUE klass, ID id, VALUE val, const char *dest)
{
rb_check_frozen(klass);
}
static void set_namespace_path(VALUE named_namespace, VALUE name);
static enum rb_id_table_iterator_result
set_namespace_path_i(ID id, VALUE v, void *payload)
{
rb_const_entry_t *ce = (rb_const_entry_t *)v;
VALUE value = ce->value;
VALUE parental_path = *((VALUE *) payload);
2021-10-06 14:18:35 +03:00
if (!rb_is_const_id(id) || !rb_namespace_p(value)) {
return ID_TABLE_CONTINUE;
}
bool has_permanent_classpath;
classname(value, &has_permanent_classpath);
if (has_permanent_classpath) {
return ID_TABLE_CONTINUE;
}
set_namespace_path(value, build_const_path(parental_path, id));
if (!RCLASS_EXT(value)->permanent_classpath) {
RCLASS_SET_CLASSPATH(value, 0, false);
}
return ID_TABLE_CONTINUE;
}
/*
* Assign permanent classpaths to all namespaces that are directly or indirectly
* nested under +named_namespace+. +named_namespace+ must have a permanent
* classpath.
*/
static void
set_namespace_path(VALUE named_namespace, VALUE namespace_path)
{
struct rb_id_table *const_table = RCLASS_CONST_TBL(named_namespace);
RB_VM_LOCK_ENTER();
{
RCLASS_SET_CLASSPATH(named_namespace, namespace_path, true);
if (const_table) {
rb_id_table_foreach(const_table, set_namespace_path_i, &namespace_path);
}
}
RB_VM_LOCK_LEAVE();
}
static void
const_added(VALUE klass, ID const_name)
{
if (GET_VM()->running) {
VALUE name = ID2SYM(const_name);
rb_funcallv(klass, idConst_added, 1, &name);
}
}
static void
const_set(VALUE klass, ID id, VALUE val)
{
rb_const_entry_t *ce;
* sprintf.c (rb_str_format): allow %c to print one character string (e.g. ?x). * lib/tempfile.rb (Tempfile::make_tmpname): put dot between basename and pid. [ruby-talk:196272] * parse.y (do_block): remove -> style block. * parse.y (parser_yylex): remove tLAMBDA_ARG. * eval.c (rb_call0): binding for the return event hook should have consistent scope. [ruby-core:07928] * eval.c (proc_invoke): return behavior should depend whether it is surrounded by a lambda or a mere block. * eval.c (formal_assign): handles post splat arguments. * eval.c (rb_call0): ditto. * st.c (strhash): use FNV-1a hash. * parse.y (parser_yylex): removed experimental ';;' terminator. * eval.c (rb_node_arity): should be aware of post splat arguments. * eval.c (rb_proc_arity): ditto. * parse.y (f_args): syntax rule enhanced to support arguments after the splat. * parse.y (block_param): ditto for block parameters. * parse.y (f_post_arg): mandatory formal arguments after the splat argument. * parse.y (new_args_gen): generate nodes for mandatory formal arguments after the splat argument. * eval.c (rb_eval): dispatch mandatory formal arguments after the splat argument. * parse.y (args): allow more than one splat in the argument list. * parse.y (method_call): allow aref [] to accept all kind of method argument, including assocs, splat, and block argument. * eval.c (SETUP_ARGS0): prepare block argument as well. * lib/mathn.rb (Integer): remove Integer#gcd2. [ruby-core:07931] * eval.c (error_line): print receivers true/false/nil specially. * eval.c (rb_proc_yield): handles parameters in yield semantics. * eval.c (nil_yield): gives LocalJumpError to denote no block error. * io.c (rb_io_getc): now takes one-character string. * string.c (rb_str_hash): use FNV-1a hash from Fowler/Noll/Vo hashing algorithm. * string.c (rb_str_aref): str[0] now returns 1 character string, instead of a fixnum. [Ruby2] * parse.y (parser_yylex): ?c now returns 1 character string, instead of a fixnum. [Ruby2] * string.c (rb_str_aset): no longer support fixnum insertion. * eval.c (umethod_bind): should not update original class. [ruby-dev:28636] * eval.c (ev_const_get): should support constant access from within instance_eval(). [ruby-dev:28327] * time.c (time_timeval): should round for usec floating number. [ruby-core:07896] * time.c (time_add): ditto. * dir.c (sys_warning): should not call a vararg function rb_sys_warning() indirectly. [ruby-core:07886] * numeric.c (flo_divmod): the first element of Float#divmod should be an integer. [ruby-dev:28589] * test/ruby/test_float.rb: add tests for divmod, div, modulo and remainder. * re.c (rb_reg_initialize): should not allow modifying literal regexps. frozen check moved from rb_reg_initialize_m as well. * re.c (rb_reg_initialize): should not modify untainted objects in safe levels higher than 3. * re.c (rb_memcmp): type change from char* to const void*. * dir.c (dir_close): should not close untainted dir stream. * dir.c (GetDIR): add tainted/frozen check for each dir operation. * lib/rdoc/parsers/parse_rb.rb (RDoc::RubyParser::parse_symbol_arg): typo fixed. a patch from Florian Gross <florg at florg.net>. * eval.c (EXEC_EVENT_HOOK): trace_func may remove itself from event_hooks. no guarantee for arbitrary hook deletion. [ruby-dev:28632] * util.c (ruby_strtod): differ addition to minimize error. [ruby-dev:28619] * util.c (ruby_strtod): should not raise ERANGE when the input string does not have any digits. [ruby-dev:28629] * eval.c (proc_invoke): should restore old ruby_frame->block. thanks to ts <decoux at moulon.inra.fr>. [ruby-core:07833] also fix [ruby-dev:28614] as well. * signal.c (trap): sig should be less then NSIG. Coverity found this bug. a patch from Kevin Tew <tewk at tewk.com>. [ruby-core:07823] * math.c (math_log2): add new method inspired by [ruby-talk:191237]. * math.c (math_log): add optional base argument to Math::log(). [ruby-talk:191308] * ext/syck/emitter.c (syck_scan_scalar): avoid accessing uninitialized array element. a patch from Pat Eyler <rubypate at gmail.com>. [ruby-core:07809] * array.c (rb_ary_fill): initialize local variables first. a patch from Pat Eyler <rubypate at gmail.com>. [ruby-core:07810] * ext/syck/yaml2byte.c (syck_yaml2byte_handler): need to free type_tag. a patch from Pat Eyler <rubypate at gmail.com>. [ruby-core:07808] * ext/socket/socket.c (make_hostent_internal): accept ai_family check from Sam Roberts <sroberts at uniserve.com>. [ruby-core:07691] * util.c (ruby_strtod): should not cut off 18 digits for no reason. [ruby-core:07796] * array.c (rb_ary_fill): internalize local variable "beg" to pacify Coverity. [ruby-core:07770] * pack.c (pack_unpack): now supports CRLF newlines. a patch from <tommy at tmtm.org>. [ruby-dev:28601] * applied code clean-up patch from Stefan Huehner <stefan at huehner.org>. [ruby-core:07764] * lib/jcode.rb (String::tr_s): should have translated non squeezing character sequence (i.e. a character) as well. thanks to Hiroshi Ichikawa <gimite at gimite.ddo.jp> [ruby-list:42090] * ext/socket/socket.c: document update patch from Sam Roberts <sroberts at uniserve.com>. [ruby-core:07701] * lib/mathn.rb (Integer): need not to remove gcd2. a patch from NARUSE, Yui <naruse at airemix.com>. [ruby-dev:28570] * parse.y (arg): too much NEW_LIST() * eval.c (SETUP_ARGS0): remove unnecessary access to nd_alen. * eval.c (rb_eval): use ARGSCAT for NODE_OP_ASGN1. [ruby-dev:28585] * parse.y (arg): use NODE_ARGSCAT for placeholder. * lib/getoptlong.rb (GetoptLong::get): RDoc update patch from mathew <meta at pobox.com>. [ruby-core:07738] * variable.c (rb_const_set): raise error when no target klass is supplied. [ruby-dev:28582] * prec.c (prec_prec_f): documentation patch from <gerardo.santana at gmail.com>. [ruby-core:07689] * bignum.c (rb_big_pow): second operand may be too big even if it's a Fixnum. [ruby-talk:187984] * README.EXT: update symbol description. [ruby-talk:188104] * COPYING: explicitly note GPLv2. [ruby-talk:187922] * parse.y: remove some obsolete syntax rules (unparenthesized method calls in argument list). * eval.c (rb_call0): insecure calling should be checked for non NODE_SCOPE method invocations too. * eval.c (rb_alias): should preserve the current safe level as well as method definition. * process.c (rb_f_sleep): remove RDoc description about SIGALRM which is not valid on the current implementation. [ruby-dev:28464] Thu Mar 23 21:40:47 2006 K.Kosako <sndgk393 AT ybb.ne.jp> * eval.c (method_missing): should support argument splat in super. a bug in combination of super, splat and method_missing. [ruby-talk:185438] * configure.in: Solaris SunPro compiler -rapth patch from <kuwa at labs.fujitsu.com>. [ruby-dev:28443] * configure.in: remove enable_rpath=no for Solaris. [ruby-dev:28440] * ext/win32ole/win32ole.c (ole_val2olevariantdata): change behavior of converting OLE Variant object with VT_ARRAY|VT_UI1 and Ruby String object. * ruby.1: a clarification patch from David Lutterkort <dlutter at redhat.com>. [ruby-core:7508] * lib/rdoc/ri/ri_paths.rb (RI::Paths): adding paths from rubygems directories. a patch from Eric Hodel <drbrain at segment7.net>. [ruby-core:07423] * eval.c (rb_clear_cache_by_class): clearing wrong cache. * ext/extmk.rb: use :remove_destination to install extension libraries to avoid SEGV. [ruby-dev:28417] * eval.c (rb_thread_fd_writable): should not re-schedule output from KILLED thread (must be error printing). * array.c (rb_ary_flatten_bang): allow specifying recursion level. [ruby-talk:182170] * array.c (rb_ary_flatten): ditto. * gc.c (add_heap): a heap_slots may overflow. a patch from Stefan Weil <weil at mail.berlios.de>. * eval.c (rb_call): use separate cache for fcall/vcall invocation. * eval.c (rb_eval): NODE_FCALL, NODE_VCALL can call local functions. * eval.c (rb_mod_local): a new method to specify newly added visibility "local". * eval.c (search_method): search for local methods which are visible only from the current class. * class.c (rb_class_local_methods): a method to list local methods. * object.c (Init_Object): add BasicObject class as a top level BlankSlate class. * ruby.h (SYM2ID): should not cast to signed long. [ruby-core:07414] * class.c (rb_include_module): allow module duplication. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@10235 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2006-06-10 01:20:17 +04:00
if (NIL_P(klass)) {
rb_raise(rb_eTypeError, "no class/module to define constant %"PRIsVALUE"",
QUOTE_ID(id));
* sprintf.c (rb_str_format): allow %c to print one character string (e.g. ?x). * lib/tempfile.rb (Tempfile::make_tmpname): put dot between basename and pid. [ruby-talk:196272] * parse.y (do_block): remove -> style block. * parse.y (parser_yylex): remove tLAMBDA_ARG. * eval.c (rb_call0): binding for the return event hook should have consistent scope. [ruby-core:07928] * eval.c (proc_invoke): return behavior should depend whether it is surrounded by a lambda or a mere block. * eval.c (formal_assign): handles post splat arguments. * eval.c (rb_call0): ditto. * st.c (strhash): use FNV-1a hash. * parse.y (parser_yylex): removed experimental ';;' terminator. * eval.c (rb_node_arity): should be aware of post splat arguments. * eval.c (rb_proc_arity): ditto. * parse.y (f_args): syntax rule enhanced to support arguments after the splat. * parse.y (block_param): ditto for block parameters. * parse.y (f_post_arg): mandatory formal arguments after the splat argument. * parse.y (new_args_gen): generate nodes for mandatory formal arguments after the splat argument. * eval.c (rb_eval): dispatch mandatory formal arguments after the splat argument. * parse.y (args): allow more than one splat in the argument list. * parse.y (method_call): allow aref [] to accept all kind of method argument, including assocs, splat, and block argument. * eval.c (SETUP_ARGS0): prepare block argument as well. * lib/mathn.rb (Integer): remove Integer#gcd2. [ruby-core:07931] * eval.c (error_line): print receivers true/false/nil specially. * eval.c (rb_proc_yield): handles parameters in yield semantics. * eval.c (nil_yield): gives LocalJumpError to denote no block error. * io.c (rb_io_getc): now takes one-character string. * string.c (rb_str_hash): use FNV-1a hash from Fowler/Noll/Vo hashing algorithm. * string.c (rb_str_aref): str[0] now returns 1 character string, instead of a fixnum. [Ruby2] * parse.y (parser_yylex): ?c now returns 1 character string, instead of a fixnum. [Ruby2] * string.c (rb_str_aset): no longer support fixnum insertion. * eval.c (umethod_bind): should not update original class. [ruby-dev:28636] * eval.c (ev_const_get): should support constant access from within instance_eval(). [ruby-dev:28327] * time.c (time_timeval): should round for usec floating number. [ruby-core:07896] * time.c (time_add): ditto. * dir.c (sys_warning): should not call a vararg function rb_sys_warning() indirectly. [ruby-core:07886] * numeric.c (flo_divmod): the first element of Float#divmod should be an integer. [ruby-dev:28589] * test/ruby/test_float.rb: add tests for divmod, div, modulo and remainder. * re.c (rb_reg_initialize): should not allow modifying literal regexps. frozen check moved from rb_reg_initialize_m as well. * re.c (rb_reg_initialize): should not modify untainted objects in safe levels higher than 3. * re.c (rb_memcmp): type change from char* to const void*. * dir.c (dir_close): should not close untainted dir stream. * dir.c (GetDIR): add tainted/frozen check for each dir operation. * lib/rdoc/parsers/parse_rb.rb (RDoc::RubyParser::parse_symbol_arg): typo fixed. a patch from Florian Gross <florg at florg.net>. * eval.c (EXEC_EVENT_HOOK): trace_func may remove itself from event_hooks. no guarantee for arbitrary hook deletion. [ruby-dev:28632] * util.c (ruby_strtod): differ addition to minimize error. [ruby-dev:28619] * util.c (ruby_strtod): should not raise ERANGE when the input string does not have any digits. [ruby-dev:28629] * eval.c (proc_invoke): should restore old ruby_frame->block. thanks to ts <decoux at moulon.inra.fr>. [ruby-core:07833] also fix [ruby-dev:28614] as well. * signal.c (trap): sig should be less then NSIG. Coverity found this bug. a patch from Kevin Tew <tewk at tewk.com>. [ruby-core:07823] * math.c (math_log2): add new method inspired by [ruby-talk:191237]. * math.c (math_log): add optional base argument to Math::log(). [ruby-talk:191308] * ext/syck/emitter.c (syck_scan_scalar): avoid accessing uninitialized array element. a patch from Pat Eyler <rubypate at gmail.com>. [ruby-core:07809] * array.c (rb_ary_fill): initialize local variables first. a patch from Pat Eyler <rubypate at gmail.com>. [ruby-core:07810] * ext/syck/yaml2byte.c (syck_yaml2byte_handler): need to free type_tag. a patch from Pat Eyler <rubypate at gmail.com>. [ruby-core:07808] * ext/socket/socket.c (make_hostent_internal): accept ai_family check from Sam Roberts <sroberts at uniserve.com>. [ruby-core:07691] * util.c (ruby_strtod): should not cut off 18 digits for no reason. [ruby-core:07796] * array.c (rb_ary_fill): internalize local variable "beg" to pacify Coverity. [ruby-core:07770] * pack.c (pack_unpack): now supports CRLF newlines. a patch from <tommy at tmtm.org>. [ruby-dev:28601] * applied code clean-up patch from Stefan Huehner <stefan at huehner.org>. [ruby-core:07764] * lib/jcode.rb (String::tr_s): should have translated non squeezing character sequence (i.e. a character) as well. thanks to Hiroshi Ichikawa <gimite at gimite.ddo.jp> [ruby-list:42090] * ext/socket/socket.c: document update patch from Sam Roberts <sroberts at uniserve.com>. [ruby-core:07701] * lib/mathn.rb (Integer): need not to remove gcd2. a patch from NARUSE, Yui <naruse at airemix.com>. [ruby-dev:28570] * parse.y (arg): too much NEW_LIST() * eval.c (SETUP_ARGS0): remove unnecessary access to nd_alen. * eval.c (rb_eval): use ARGSCAT for NODE_OP_ASGN1. [ruby-dev:28585] * parse.y (arg): use NODE_ARGSCAT for placeholder. * lib/getoptlong.rb (GetoptLong::get): RDoc update patch from mathew <meta at pobox.com>. [ruby-core:07738] * variable.c (rb_const_set): raise error when no target klass is supplied. [ruby-dev:28582] * prec.c (prec_prec_f): documentation patch from <gerardo.santana at gmail.com>. [ruby-core:07689] * bignum.c (rb_big_pow): second operand may be too big even if it's a Fixnum. [ruby-talk:187984] * README.EXT: update symbol description. [ruby-talk:188104] * COPYING: explicitly note GPLv2. [ruby-talk:187922] * parse.y: remove some obsolete syntax rules (unparenthesized method calls in argument list). * eval.c (rb_call0): insecure calling should be checked for non NODE_SCOPE method invocations too. * eval.c (rb_alias): should preserve the current safe level as well as method definition. * process.c (rb_f_sleep): remove RDoc description about SIGALRM which is not valid on the current implementation. [ruby-dev:28464] Thu Mar 23 21:40:47 2006 K.Kosako <sndgk393 AT ybb.ne.jp> * eval.c (method_missing): should support argument splat in super. a bug in combination of super, splat and method_missing. [ruby-talk:185438] * configure.in: Solaris SunPro compiler -rapth patch from <kuwa at labs.fujitsu.com>. [ruby-dev:28443] * configure.in: remove enable_rpath=no for Solaris. [ruby-dev:28440] * ext/win32ole/win32ole.c (ole_val2olevariantdata): change behavior of converting OLE Variant object with VT_ARRAY|VT_UI1 and Ruby String object. * ruby.1: a clarification patch from David Lutterkort <dlutter at redhat.com>. [ruby-core:7508] * lib/rdoc/ri/ri_paths.rb (RI::Paths): adding paths from rubygems directories. a patch from Eric Hodel <drbrain at segment7.net>. [ruby-core:07423] * eval.c (rb_clear_cache_by_class): clearing wrong cache. * ext/extmk.rb: use :remove_destination to install extension libraries to avoid SEGV. [ruby-dev:28417] * eval.c (rb_thread_fd_writable): should not re-schedule output from KILLED thread (must be error printing). * array.c (rb_ary_flatten_bang): allow specifying recursion level. [ruby-talk:182170] * array.c (rb_ary_flatten): ditto. * gc.c (add_heap): a heap_slots may overflow. a patch from Stefan Weil <weil at mail.berlios.de>. * eval.c (rb_call): use separate cache for fcall/vcall invocation. * eval.c (rb_eval): NODE_FCALL, NODE_VCALL can call local functions. * eval.c (rb_mod_local): a new method to specify newly added visibility "local". * eval.c (search_method): search for local methods which are visible only from the current class. * class.c (rb_class_local_methods): a method to list local methods. * object.c (Init_Object): add BasicObject class as a top level BlankSlate class. * ruby.h (SYM2ID): should not cast to signed long. [ruby-core:07414] * class.c (rb_include_module): allow module duplication. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@10235 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2006-06-10 01:20:17 +04:00
}
if (!rb_ractor_main_p() && !rb_ractor_shareable_p(val)) {
rb_raise(rb_eRactorIsolationError, "can not set constants with non-shareable objects by non-main Ractors");
}
check_before_mod_set(klass, id, val, "constant");
RB_VM_LOCK_ENTER();
{
struct rb_id_table *tbl = RCLASS_CONST_TBL(klass);
if (!tbl) {
RCLASS_CONST_TBL(klass) = tbl = rb_id_table_create(0);
rb_clear_constant_cache_for_id(id);
ce = ZALLOC(rb_const_entry_t);
rb_id_table_insert(tbl, id, (VALUE)ce);
setup_const_entry(ce, klass, val, CONST_PUBLIC);
}
else {
struct autoload_const ac = {
.module = klass, .name = id,
.value = val, .flag = CONST_PUBLIC,
/* fill the rest with 0 */
};
ac.file = rb_source_location(&ac.line);
const_tbl_update(&ac, false);
}
}
RB_VM_LOCK_LEAVE();
/*
* Resolve and cache class name immediately to resolve ambiguity
* and avoid order-dependency on const_tbl
*/
if (rb_cObject && rb_namespace_p(val)) {
bool val_path_permanent;
VALUE val_path = classname(val, &val_path_permanent);
if (NIL_P(val_path) || !val_path_permanent) {
if (klass == rb_cObject) {
set_namespace_path(val, rb_id2str(id));
}
else {
bool parental_path_permanent;
VALUE parental_path = classname(klass, &parental_path_permanent);
if (NIL_P(parental_path)) {
bool throwaway;
parental_path = rb_tmp_class_path(klass, &throwaway, make_temporary_path);
}
if (parental_path_permanent && !val_path_permanent) {
set_namespace_path(val, build_const_path(parental_path, id));
}
else if (!parental_path_permanent && NIL_P(val_path)) {
RCLASS_SET_CLASSPATH(val, build_const_path(parental_path, id), false);
}
}
}
}
}
void
rb_const_set(VALUE klass, ID id, VALUE val)
{
const_set(klass, id, val);
const_added(klass, id);
}
static struct autoload_data *
2022-05-25 14:12:54 +03:00
autoload_data_for_named_constant(VALUE module, ID name, struct autoload_const **autoload_const_pointer)
{
2022-05-25 14:12:54 +03:00
VALUE autoload_data_value = autoload_data(module, name);
if (!autoload_data_value) return 0;
struct autoload_data *autoload_data = get_autoload_data(autoload_data_value, autoload_const_pointer);
if (!autoload_data) return 0;
/* for autoloading thread, keep the defined value to autoloading storage */
2022-05-25 14:12:54 +03:00
if (autoload_by_current(autoload_data)) {
return autoload_data;
}
2022-05-25 14:12:54 +03:00
return 0;
}
static void
const_tbl_update(struct autoload_const *ac, int autoload_force)
{
VALUE value;
VALUE klass = ac->module;
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
VALUE val = ac->value;
ID id = ac->name;
struct rb_id_table *tbl = RCLASS_CONST_TBL(klass);
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
rb_const_flag_t visibility = ac->flag;
rb_const_entry_t *ce;
if (rb_id_table_lookup(tbl, id, &value)) {
ce = (rb_const_entry_t *)value;
2022-11-15 07:24:08 +03:00
if (UNDEF_P(ce->value)) {
RUBY_ASSERT_CRITICAL_SECTION_ENTER();
VALUE file = ac->file;
int line = ac->line;
struct autoload_data *ele = autoload_data_for_named_constant(klass, id, &ac);
if (!autoload_force && ele) {
rb_clear_constant_cache_for_id(id);
ac->value = val; /* autoload_data is non-WB-protected */
ac->file = rb_source_location(&ac->line);
}
else {
/* otherwise autoloaded constant, allow to override */
autoload_delete(klass, id);
ce->flag = visibility;
RB_OBJ_WRITE(klass, &ce->value, val);
RB_OBJ_WRITE(klass, &ce->file, file);
ce->line = line;
}
RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
return;
}
else {
VALUE name = QUOTE_ID(id);
visibility = ce->flag;
if (klass == rb_cObject)
rb_warn("already initialized constant %"PRIsVALUE"", name);
else
rb_warn("already initialized constant %"PRIsVALUE"::%"PRIsVALUE"",
rb_class_name(klass), name);
if (!NIL_P(ce->file) && ce->line) {
rb_compile_warn(RSTRING_PTR(ce->file), ce->line,
"previous definition of %"PRIsVALUE" was here", name);
}
}
rb_clear_constant_cache_for_id(id);
setup_const_entry(ce, klass, val, visibility);
}
else {
rb_clear_constant_cache_for_id(id);
ce = ZALLOC(rb_const_entry_t);
rb_id_table_insert(tbl, id, (VALUE)ce);
setup_const_entry(ce, klass, val, visibility);
}
}
static void
setup_const_entry(rb_const_entry_t *ce, VALUE klass, VALUE val,
rb_const_flag_t visibility)
{
ce->flag = visibility;
RB_OBJ_WRITE(klass, &ce->value, val);
RB_OBJ_WRITE(klass, &ce->file, rb_source_location(&ce->line));
}
void
rb_define_const(VALUE klass, const char *name, VALUE val)
{
ID id = rb_intern(name);
if (!rb_is_const_id(id)) {
rb_warn("rb_define_const: invalid name '%s' for constant", name);
}
if (!RB_SPECIAL_CONST_P(val)) {
rb_vm_register_global_object(val);
}
rb_const_set(klass, id, val);
}
void
rb_define_global_const(const char *name, VALUE val)
{
rb_define_const(rb_cObject, name, val);
}
static void
set_const_visibility(VALUE mod, int argc, const VALUE *argv,
rb_const_flag_t flag, rb_const_flag_t mask)
{
int i;
rb_const_entry_t *ce;
ID id;
rb_class_modify_check(mod);
if (argc == 0) {
rb_warning("%"PRIsVALUE" with no argument is just ignored",
QUOTE_ID(rb_frame_callee()));
return;
}
for (i = 0; i < argc; i++) {
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
struct autoload_const *ac;
VALUE val = argv[i];
id = rb_check_id(&val);
if (!id) {
undefined_constant(mod, val);
}
if ((ce = rb_const_lookup(mod, id))) {
ce->flag &= ~mask;
ce->flag |= flag;
2022-11-15 07:24:08 +03:00
if (UNDEF_P(ce->value)) {
struct autoload_data *ele;
2022-07-21 19:23:58 +03:00
ele = autoload_data_for_named_constant(mod, id, &ac);
if (ele) {
variable.c: fix multiple autoload with identical file (again) We need to ensure autoload declarations pointing to the same feature (aka "file") can wait on each other to avoid deadlock situations. So, reorganize autoload data structures to maintain a feature => autoload_data_i mapping, and have module constant tables point to the new autoload_const struct instead of directly to autoload_data_i. This allows multiple autoload_const structs to refer to the SAME autoload_data_i struct, and with it, the on-stack autoload_state.waitq. The end result is different constants can share the same waitq (tied to the feature name), and not deadlock each other during loading. Thanks to Eugene Kenny for the bug report and reproducible test case. Reported-by: Eugene Kenny <elkenny@gmail.com> * variable.c (autoload_featuremap): new global (struct autoload_const): new per-const struct (struct autoload_state): reference autoload_const instead of autoload_data_i (struct autoload_data_i): remove per-const (autoload_i_mark): delete from autoload_featuremap if unreferenced (autoload_c_mark): new dmark callback (autoload_c_free): new dfree callback (autoload_c_memsize): new memsize callback (autoload_const_type): new data type (get_autoload_data): set autoload_const as well (rb_autoload_str): use new data structures (autoload_delete): cleanup from autoload_featuremap (check_autoload_required): adjust for new internals (rb_autoloading_value): ditto (struct autoload_const_set_args): remove, redundant with autoload_const (const_tbl_update): adjust for new internals (autoload_const_set): ditto (autoload_require): ditto (autoload_reset): ditto (rb_autoload_load): ditto (rb_const_set): ditto (current_autoload_data): ditto (set_const_visibility): ditto * test/ruby/test_autoload.rb (test_autoload_same_file): new test (test_no_leak): new test [ruby-core:86935] [Bug #14742] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63392 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-05-10 11:54:26 +03:00
ac->flag &= ~mask;
ac->flag |= flag;
}
}
rb_clear_constant_cache_for_id(id);
}
else {
undefined_constant(mod, ID2SYM(id));
}
}
}
void
rb_deprecate_constant(VALUE mod, const char *name)
{
rb_const_entry_t *ce;
ID id;
long len = strlen(name);
rb_class_modify_check(mod);
if (!(id = rb_check_id_cstr(name, len, NULL))) {
undefined_constant(mod, rb_fstring_new(name, len));
}
if (!(ce = rb_const_lookup(mod, id))) {
undefined_constant(mod, ID2SYM(id));
}
ce->flag |= CONST_DEPRECATED;
}
/*
* call-seq:
* mod.private_constant(symbol, ...) => mod
*
* Makes a list of existing constants private.
*/
VALUE
rb_mod_private_constant(int argc, const VALUE *argv, VALUE obj)
{
set_const_visibility(obj, argc, argv, CONST_PRIVATE, CONST_VISIBILITY_MASK);
return obj;
}
/*
* call-seq:
* mod.public_constant(symbol, ...) => mod
*
* Makes a list of existing constants public.
*/
VALUE
rb_mod_public_constant(int argc, const VALUE *argv, VALUE obj)
{
set_const_visibility(obj, argc, argv, CONST_PUBLIC, CONST_VISIBILITY_MASK);
return obj;
}
/*
* call-seq:
* mod.deprecate_constant(symbol, ...) => mod
*
* Makes a list of existing constants deprecated. Attempt
* to refer to them will produce a warning.
*
* module HTTP
* NotFound = Exception.new
* NOT_FOUND = NotFound # previous version of the library used this name
*
* deprecate_constant :NOT_FOUND
* end
*
* HTTP::NOT_FOUND
* # warning: constant HTTP::NOT_FOUND is deprecated
*
*/
VALUE
rb_mod_deprecate_constant(int argc, const VALUE *argv, VALUE obj)
{
set_const_visibility(obj, argc, argv, CONST_DEPRECATED, CONST_DEPRECATED);
return obj;
}
static VALUE
original_module(VALUE c)
{
if (RB_TYPE_P(c, T_ICLASS))
return RBASIC(c)->klass;
return c;
}
static int
cvar_lookup_at(VALUE klass, ID id, st_data_t *v)
{
if (RB_TYPE_P(klass, T_ICLASS)) {
if (FL_TEST_RAW(klass, RICLASS_IS_ORIGIN)) {
return 0;
2022-11-10 03:11:20 +03:00
}
else {
// check the original module
klass = RBASIC(klass)->klass;
}
}
VALUE n = rb_ivar_lookup(klass, id, Qundef);
2022-11-15 07:24:08 +03:00
if (UNDEF_P(n)) return 0;
if (v) *v = n;
return 1;
}
static VALUE
cvar_front_klass(VALUE klass)
{
if (RCLASS_SINGLETON_P(klass)) {
VALUE obj = RCLASS_ATTACHED_OBJECT(klass);
if (rb_namespace_p(obj)) {
return obj;
}
}
return RCLASS_SUPER(klass);
}
static void
cvar_overtaken(VALUE front, VALUE target, ID id)
{
if (front && target != front) {
if (original_module(front) != original_module(target)) {
rb_raise(rb_eRuntimeError,
"class variable % "PRIsVALUE" of %"PRIsVALUE" is overtaken by %"PRIsVALUE"",
ID2SYM(id), rb_class_name(original_module(front)),
rb_class_name(original_module(target)));
}
if (BUILTIN_TYPE(front) == T_CLASS) {
rb_ivar_delete(front, id, Qundef);
}
}
}
#define CVAR_FOREACH_ANCESTORS(klass, v, r) \
for (klass = cvar_front_klass(klass); klass; klass = RCLASS_SUPER(klass)) { \
if (cvar_lookup_at(klass, id, (v))) { \
r; \
} \
}
#define CVAR_LOOKUP(v,r) do {\
CVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(); \
if (cvar_lookup_at(klass, id, (v))) {r;}\
CVAR_FOREACH_ANCESTORS(klass, v, r);\
} while(0)
2023-03-11 04:38:42 +03:00
static VALUE
find_cvar(VALUE klass, VALUE * front, VALUE * target, ID id)
{
VALUE v = Qundef;
CVAR_LOOKUP(&v, {
if (!*front) {
*front = klass;
}
*target = klass;
});
return v;
}
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
static void
check_for_cvar_table(VALUE subclass, VALUE key)
{
// Must not check ivar on ICLASS
if (!RB_TYPE_P(subclass, T_ICLASS) && RTEST(rb_ivar_defined(subclass, key))) {
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
RB_DEBUG_COUNTER_INC(cvar_class_invalidate);
ruby_vm_global_cvar_state++;
return;
}
rb_class_foreach_subclass(subclass, check_for_cvar_table, key);
}
void
rb_cvar_set(VALUE klass, ID id, VALUE val)
{
VALUE tmp, front = 0, target = 0;
tmp = klass;
CVAR_LOOKUP(0, {if (!front) front = klass; target = klass;});
if (target) {
cvar_overtaken(front, target, id);
}
else {
target = tmp;
}
if (RB_TYPE_P(target, T_ICLASS)) {
target = RBASIC(target)->klass;
}
check_before_mod_set(target, id, val, "class variable");
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
int result = rb_class_ivar_set(target, id, val);
struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(target);
if (!rb_cvc_tbl) {
rb_cvc_tbl = RCLASS_CVC_TBL(target) = rb_id_table_create(2);
}
struct rb_cvar_class_tbl_entry *ent;
VALUE ent_data;
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
ent = ALLOC(struct rb_cvar_class_tbl_entry);
ent->class_value = target;
ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
ent->cref = 0;
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
rb_id_table_insert(rb_cvc_tbl, id, (VALUE)ent);
RB_DEBUG_COUNTER_INC(cvar_inline_miss);
}
else {
ent = (void *)ent_data;
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
}
// Break the cvar cache if this is a new class variable
// and target is a module or a subclass with the same
// cvar in this lookup.
if (result == 0) {
if (RB_TYPE_P(target, T_CLASS)) {
if (RCLASS_SUBCLASSES(target)) {
rb_class_foreach_subclass(target, check_for_cvar_table, id);
}
}
}
}
VALUE
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
rb_cvar_find(VALUE klass, ID id, VALUE *front)
{
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
VALUE target = 0;
VALUE value;
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
value = find_cvar(klass, front, &target, id);
if (!target) {
rb_name_err_raise("uninitialized class variable %1$s in %2$s",
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
klass, ID2SYM(id));
}
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
cvar_overtaken(*front, target, id);
return (VALUE)value;
}
Add a cache for class variables Redo of 34a2acdac788602c14bf05fb616215187badd504 and 931138b00696419945dc03e10f033b1f53cd50f3 which were reverted. GitHub PR #4340. This change implements a cache for class variables. Previously there was no cache for cvars. Cvar access is slow due to needing to travel all the way up th ancestor tree before returning the cvar value. The deeper the ancestor tree the slower cvar access will be. The benefits of the cache are more visible with a higher number of included modules due to the way Ruby looks up class variables. The benchmark here includes 26 modules and shows with the cache, this branch is 6.5x faster when accessing class variables. ``` compare-ruby: ruby 3.1.0dev (2021-03-15T06:22:34Z master 9e5105c) [x86_64-darwin19] built-ruby: ruby 3.1.0dev (2021-03-15T12:12:44Z add-cache-for-clas.. c6be009) [x86_64-darwin19] | |compare-ruby|built-ruby| |:--------|-----------:|---------:| |vm_cvar | 5.681M| 36.980M| | | -| 6.51x| ``` Benchmark.ips calling `ActiveRecord::Base.logger` from within a Rails application. ActiveRecord::Base.logger has 71 ancestors. The more ancestors a tree has, the more clear the speed increase. IE if Base had only one ancestor we'd see no improvement. This benchmark is run on a vanilla Rails application. Benchmark code: ```ruby require "benchmark/ips" require_relative "config/environment" Benchmark.ips do |x| x.report "logger" do ActiveRecord::Base.logger end end ``` Ruby 3.0 master / Rails 6.1: ``` Warming up -------------------------------------- logger 155.251k i/100ms Calculating ------------------------------------- ``` Ruby 3.0 with cvar cache / Rails 6.1: ``` Warming up -------------------------------------- logger 1.546M i/100ms Calculating ------------------------------------- logger 14.857M (± 4.8%) i/s - 74.198M in 5.006202s ``` Lastly we ran a benchmark to demonstate the difference between master and our cache when the number of modules increases. This benchmark measures 1 ancestor, 30 ancestors, and 100 ancestors. Ruby 3.0 master: ``` Warming up -------------------------------------- 1 module 1.231M i/100ms 30 modules 432.020k i/100ms 100 modules 145.399k i/100ms Calculating ------------------------------------- 1 module 12.210M (± 2.1%) i/s - 61.553M in 5.043400s 30 modules 4.354M (± 2.7%) i/s - 22.033M in 5.063839s 100 modules 1.434M (± 2.9%) i/s - 7.270M in 5.072531s Comparison: 1 module: 12209958.3 i/s 30 modules: 4354217.8 i/s - 2.80x (± 0.00) slower 100 modules: 1434447.3 i/s - 8.51x (± 0.00) slower ``` Ruby 3.0 with cvar cache: ``` Warming up -------------------------------------- 1 module 1.641M i/100ms 30 modules 1.655M i/100ms 100 modules 1.620M i/100ms Calculating ------------------------------------- 1 module 16.279M (± 3.8%) i/s - 82.038M in 5.046923s 30 modules 15.891M (± 3.9%) i/s - 79.459M in 5.007958s 100 modules 16.087M (± 3.6%) i/s - 81.005M in 5.041931s Comparison: 1 module: 16279458.0 i/s 100 modules: 16087484.6 i/s - same-ish: difference falls within error 30 modules: 15891406.2 i/s - same-ish: difference falls within error ``` Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
2021-06-01 20:34:06 +03:00
VALUE
rb_cvar_get(VALUE klass, ID id)
{
VALUE front = 0;
return rb_cvar_find(klass, id, &front);
}
VALUE
rb_cvar_defined(VALUE klass, ID id)
{
if (!klass) return Qfalse;
CVAR_LOOKUP(0,return Qtrue);
return Qfalse;
}
static ID
cv_intern(VALUE klass, const char *name)
{
* eval.c (block_pass): should not downgrade safe level. * ext/dbm/extconf.rb: allow specifying dbm-type explicitly. * ext/dbm/extconf.rb: avoid gdbm if possible, because it leaks memory, whereas gdbm.so doesn't. potential incompatibility. * string.c (rb_str_insert): new method. * parse.y (yylex): lex_state after RESCUE_MOD should be EXPR_BEG. * array.c (rb_ary_insert): new method. * array.c (rb_ary_update): new utility function. * io.c (set_outfile): should check if closed before assignment. * eval.c (rb_eval): should preserve value of ruby_errinfo. * eval.c (rb_thread_schedule): infinite sleep should not cause dead lock. * array.c (rb_ary_flatten_bang): proper recursive detection. * eval.c (yield_under): need not to prohibit at safe level 4. * pack.c (pack_pack): p/P packs nil into NULL. * pack.c (pack_unpack): p/P unpacks NULL into nil. * pack.c (pack_pack): size check for P template. * ruby.c (set_arg0): wrong predicate when new $0 value is bigger than original space. * gc.c (id2ref): should use NUM2ULONG() * object.c (rb_mod_const_get): check whether name is a class variable name. * object.c (rb_mod_const_set): ditto. * object.c (rb_mod_const_defined): ditto. * marshal.c (w_float): precision changed to "%.16g" * eval.c (rb_call0): wrong retry behavior. * numeric.c (fix_aref): a bug on long>int architecture. * eval.c (rb_eval_string_wrap): should restore ruby_wrapper. * regex.c (re_compile_pattern): char class at either edge of range should be invalid. * eval.c (handle_rescue): use === to compare exception match. * error.c (syserr_eqq): comparison between SytemCallErrors should based on their error numbers. * eval.c (safe_getter): should use INT2NUM(). * bignum.c (rb_big2long): 2**31 cannot fit in 31 bit long. * regex.c (calculate_must_string): wrong length calculation. * eval.c (rb_thread_start_0): fixed memory leak. * parse.y (none): should clear cmdarg_stack too. * io.c (rb_fopen): use setvbuf() to avoid recursive malloc() on some platforms. * file.c (rb_stat_dev): device functions should honor stat field types (except long long such as dev_t). * eval.c (rb_mod_nesting): should not push nil for nesting array. * eval.c (rb_mod_s_constants): should not search array by rb_mod_const_at() for nil (happens for singleton class). * class.c (rb_singleton_class_attached): should modify iv_tbl by itself, no longer use rb_iv_set() to avoid freeze check error. * variable.c (rb_const_get): error message "uninitialized constant Foo at Bar::Baz" instead of "uninitialized constantBar::Baz::Foo". * eval.c (rb_mod_included): new hook called from rb_mod_include(). * io.c (opt_i_set): should strdup() inplace_edit string. * eval.c (exec_under): need to push cref too. * eval.c (rb_f_missing): raise NameError for "undefined local variable or method". * error.c (Init_Exception): new exception NoMethodError. NameError moved under ScriptError again. * eval.c (rb_f_missing): use NoMethodError instead of NameError. * file.c (Init_File): should redifine "new" class method. * eval.c (PUSH_CREF): sharing cref node was problematic. maintain runtime cref list instead. * eval.c (rb_eval): copy defn node before registering. * eval.c (rb_load): clear ruby_cref before loading. * variable.c (rb_const_get): no recursion to show full class path for modules. * eval.c (rb_set_safe_level): should set safe level in curr_thread as well. * eval.c (safe_setter): ditto. * object.c (rb_obj_is_instance_of): nil belongs to false, not true. * time.c (make_time_t): proper (I hope) daylight saving time handling for both US and Europe. I HATE DST! * eval.c (rb_thread_wait_for): non blocked signal interrupt should stop the interval. * eval.c (proc_eq): class check aded. * eval.c (proc_eq): typo fixed ("return" was ommitted). * error.c (Init_Exception): move NameError under StandardError. * class.c (rb_mod_clone): should copy method bodies too. * bignum.c (bigdivrem): should trim trailing zero bdigits of remainder, even if dd == 0. * file.c (check3rdbyte): safe string check moved here. * time.c (make_time_t): remove HAVE_TM_ZONE code since it sometimes reports wrong time. * time.c (make_time_t): remove unnecessary range check for platforms where negative time_t is available. * process.c (proc_waitall): should push Process::Status instead of Finuxm status. * process.c (waitall_each): should add all entries in pid_tbl. these changes are inspired by Koji Arai. Thanks. * process.c (proc_wait): should not iterate if pid_tbl is 0. * process.c (proc_waitall): ditto. * numeric.c (flodivmod): a bug in no fmod case. * process.c (pst_wifsignaled): should apply WIFSIGNALED for status (int), not st (VALUE). * io.c (Init_IO): value of $/ and $\ are no longer restricted to strings. type checks are done on demand. * class.c (rb_include_module): module inclusion should be check taints. * ruby.h (STR2CSTR): replace to StringType() and StringTypePtr(). * ruby.h (rb_str2cstr): ditto. * eval.c (rb_load): should not copy topleve local variables. It cause variable/method ambiguity. Thanks to L. Peter Deutsch. * class.c (rb_include_module): freeze check at first. * eval.c (rb_attr): sprintf() and rb_intern() moved into conditional body. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@1356 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2001-05-02 08:22:21 +04:00
ID id = rb_intern(name);
if (!rb_is_class_id(id)) {
rb_name_err_raise("wrong class variable name %1$s",
klass, rb_str_new_cstr(name));
* eval.c (block_pass): should not downgrade safe level. * ext/dbm/extconf.rb: allow specifying dbm-type explicitly. * ext/dbm/extconf.rb: avoid gdbm if possible, because it leaks memory, whereas gdbm.so doesn't. potential incompatibility. * string.c (rb_str_insert): new method. * parse.y (yylex): lex_state after RESCUE_MOD should be EXPR_BEG. * array.c (rb_ary_insert): new method. * array.c (rb_ary_update): new utility function. * io.c (set_outfile): should check if closed before assignment. * eval.c (rb_eval): should preserve value of ruby_errinfo. * eval.c (rb_thread_schedule): infinite sleep should not cause dead lock. * array.c (rb_ary_flatten_bang): proper recursive detection. * eval.c (yield_under): need not to prohibit at safe level 4. * pack.c (pack_pack): p/P packs nil into NULL. * pack.c (pack_unpack): p/P unpacks NULL into nil. * pack.c (pack_pack): size check for P template. * ruby.c (set_arg0): wrong predicate when new $0 value is bigger than original space. * gc.c (id2ref): should use NUM2ULONG() * object.c (rb_mod_const_get): check whether name is a class variable name. * object.c (rb_mod_const_set): ditto. * object.c (rb_mod_const_defined): ditto. * marshal.c (w_float): precision changed to "%.16g" * eval.c (rb_call0): wrong retry behavior. * numeric.c (fix_aref): a bug on long>int architecture. * eval.c (rb_eval_string_wrap): should restore ruby_wrapper. * regex.c (re_compile_pattern): char class at either edge of range should be invalid. * eval.c (handle_rescue): use === to compare exception match. * error.c (syserr_eqq): comparison between SytemCallErrors should based on their error numbers. * eval.c (safe_getter): should use INT2NUM(). * bignum.c (rb_big2long): 2**31 cannot fit in 31 bit long. * regex.c (calculate_must_string): wrong length calculation. * eval.c (rb_thread_start_0): fixed memory leak. * parse.y (none): should clear cmdarg_stack too. * io.c (rb_fopen): use setvbuf() to avoid recursive malloc() on some platforms. * file.c (rb_stat_dev): device functions should honor stat field types (except long long such as dev_t). * eval.c (rb_mod_nesting): should not push nil for nesting array. * eval.c (rb_mod_s_constants): should not search array by rb_mod_const_at() for nil (happens for singleton class). * class.c (rb_singleton_class_attached): should modify iv_tbl by itself, no longer use rb_iv_set() to avoid freeze check error. * variable.c (rb_const_get): error message "uninitialized constant Foo at Bar::Baz" instead of "uninitialized constantBar::Baz::Foo". * eval.c (rb_mod_included): new hook called from rb_mod_include(). * io.c (opt_i_set): should strdup() inplace_edit string. * eval.c (exec_under): need to push cref too. * eval.c (rb_f_missing): raise NameError for "undefined local variable or method". * error.c (Init_Exception): new exception NoMethodError. NameError moved under ScriptError again. * eval.c (rb_f_missing): use NoMethodError instead of NameError. * file.c (Init_File): should redifine "new" class method. * eval.c (PUSH_CREF): sharing cref node was problematic. maintain runtime cref list instead. * eval.c (rb_eval): copy defn node before registering. * eval.c (rb_load): clear ruby_cref before loading. * variable.c (rb_const_get): no recursion to show full class path for modules. * eval.c (rb_set_safe_level): should set safe level in curr_thread as well. * eval.c (safe_setter): ditto. * object.c (rb_obj_is_instance_of): nil belongs to false, not true. * time.c (make_time_t): proper (I hope) daylight saving time handling for both US and Europe. I HATE DST! * eval.c (rb_thread_wait_for): non blocked signal interrupt should stop the interval. * eval.c (proc_eq): class check aded. * eval.c (proc_eq): typo fixed ("return" was ommitted). * error.c (Init_Exception): move NameError under StandardError. * class.c (rb_mod_clone): should copy method bodies too. * bignum.c (bigdivrem): should trim trailing zero bdigits of remainder, even if dd == 0. * file.c (check3rdbyte): safe string check moved here. * time.c (make_time_t): remove HAVE_TM_ZONE code since it sometimes reports wrong time. * time.c (make_time_t): remove unnecessary range check for platforms where negative time_t is available. * process.c (proc_waitall): should push Process::Status instead of Finuxm status. * process.c (waitall_each): should add all entries in pid_tbl. these changes are inspired by Koji Arai. Thanks. * process.c (proc_wait): should not iterate if pid_tbl is 0. * process.c (proc_waitall): ditto. * numeric.c (flodivmod): a bug in no fmod case. * process.c (pst_wifsignaled): should apply WIFSIGNALED for status (int), not st (VALUE). * io.c (Init_IO): value of $/ and $\ are no longer restricted to strings. type checks are done on demand. * class.c (rb_include_module): module inclusion should be check taints. * ruby.h (STR2CSTR): replace to StringType() and StringTypePtr(). * ruby.h (rb_str2cstr): ditto. * eval.c (rb_load): should not copy topleve local variables. It cause variable/method ambiguity. Thanks to L. Peter Deutsch. * class.c (rb_include_module): freeze check at first. * eval.c (rb_attr): sprintf() and rb_intern() moved into conditional body. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@1356 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2001-05-02 08:22:21 +04:00
}
return id;
}
void
rb_cv_set(VALUE klass, const char *name, VALUE val)
{
ID id = cv_intern(klass, name);
rb_cvar_set(klass, id, val);
}
VALUE
rb_cv_get(VALUE klass, const char *name)
{
ID id = cv_intern(klass, name);
* eval.c (block_pass): should not downgrade safe level. * ext/dbm/extconf.rb: allow specifying dbm-type explicitly. * ext/dbm/extconf.rb: avoid gdbm if possible, because it leaks memory, whereas gdbm.so doesn't. potential incompatibility. * string.c (rb_str_insert): new method. * parse.y (yylex): lex_state after RESCUE_MOD should be EXPR_BEG. * array.c (rb_ary_insert): new method. * array.c (rb_ary_update): new utility function. * io.c (set_outfile): should check if closed before assignment. * eval.c (rb_eval): should preserve value of ruby_errinfo. * eval.c (rb_thread_schedule): infinite sleep should not cause dead lock. * array.c (rb_ary_flatten_bang): proper recursive detection. * eval.c (yield_under): need not to prohibit at safe level 4. * pack.c (pack_pack): p/P packs nil into NULL. * pack.c (pack_unpack): p/P unpacks NULL into nil. * pack.c (pack_pack): size check for P template. * ruby.c (set_arg0): wrong predicate when new $0 value is bigger than original space. * gc.c (id2ref): should use NUM2ULONG() * object.c (rb_mod_const_get): check whether name is a class variable name. * object.c (rb_mod_const_set): ditto. * object.c (rb_mod_const_defined): ditto. * marshal.c (w_float): precision changed to "%.16g" * eval.c (rb_call0): wrong retry behavior. * numeric.c (fix_aref): a bug on long>int architecture. * eval.c (rb_eval_string_wrap): should restore ruby_wrapper. * regex.c (re_compile_pattern): char class at either edge of range should be invalid. * eval.c (handle_rescue): use === to compare exception match. * error.c (syserr_eqq): comparison between SytemCallErrors should based on their error numbers. * eval.c (safe_getter): should use INT2NUM(). * bignum.c (rb_big2long): 2**31 cannot fit in 31 bit long. * regex.c (calculate_must_string): wrong length calculation. * eval.c (rb_thread_start_0): fixed memory leak. * parse.y (none): should clear cmdarg_stack too. * io.c (rb_fopen): use setvbuf() to avoid recursive malloc() on some platforms. * file.c (rb_stat_dev): device functions should honor stat field types (except long long such as dev_t). * eval.c (rb_mod_nesting): should not push nil for nesting array. * eval.c (rb_mod_s_constants): should not search array by rb_mod_const_at() for nil (happens for singleton class). * class.c (rb_singleton_class_attached): should modify iv_tbl by itself, no longer use rb_iv_set() to avoid freeze check error. * variable.c (rb_const_get): error message "uninitialized constant Foo at Bar::Baz" instead of "uninitialized constantBar::Baz::Foo". * eval.c (rb_mod_included): new hook called from rb_mod_include(). * io.c (opt_i_set): should strdup() inplace_edit string. * eval.c (exec_under): need to push cref too. * eval.c (rb_f_missing): raise NameError for "undefined local variable or method". * error.c (Init_Exception): new exception NoMethodError. NameError moved under ScriptError again. * eval.c (rb_f_missing): use NoMethodError instead of NameError. * file.c (Init_File): should redifine "new" class method. * eval.c (PUSH_CREF): sharing cref node was problematic. maintain runtime cref list instead. * eval.c (rb_eval): copy defn node before registering. * eval.c (rb_load): clear ruby_cref before loading. * variable.c (rb_const_get): no recursion to show full class path for modules. * eval.c (rb_set_safe_level): should set safe level in curr_thread as well. * eval.c (safe_setter): ditto. * object.c (rb_obj_is_instance_of): nil belongs to false, not true. * time.c (make_time_t): proper (I hope) daylight saving time handling for both US and Europe. I HATE DST! * eval.c (rb_thread_wait_for): non blocked signal interrupt should stop the interval. * eval.c (proc_eq): class check aded. * eval.c (proc_eq): typo fixed ("return" was ommitted). * error.c (Init_Exception): move NameError under StandardError. * class.c (rb_mod_clone): should copy method bodies too. * bignum.c (bigdivrem): should trim trailing zero bdigits of remainder, even if dd == 0. * file.c (check3rdbyte): safe string check moved here. * time.c (make_time_t): remove HAVE_TM_ZONE code since it sometimes reports wrong time. * time.c (make_time_t): remove unnecessary range check for platforms where negative time_t is available. * process.c (proc_waitall): should push Process::Status instead of Finuxm status. * process.c (waitall_each): should add all entries in pid_tbl. these changes are inspired by Koji Arai. Thanks. * process.c (proc_wait): should not iterate if pid_tbl is 0. * process.c (proc_waitall): ditto. * numeric.c (flodivmod): a bug in no fmod case. * process.c (pst_wifsignaled): should apply WIFSIGNALED for status (int), not st (VALUE). * io.c (Init_IO): value of $/ and $\ are no longer restricted to strings. type checks are done on demand. * class.c (rb_include_module): module inclusion should be check taints. * ruby.h (STR2CSTR): replace to StringType() and StringTypePtr(). * ruby.h (rb_str2cstr): ditto. * eval.c (rb_load): should not copy topleve local variables. It cause variable/method ambiguity. Thanks to L. Peter Deutsch. * class.c (rb_include_module): freeze check at first. * eval.c (rb_attr): sprintf() and rb_intern() moved into conditional body. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@1356 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2001-05-02 08:22:21 +04:00
return rb_cvar_get(klass, id);
}
void
rb_define_class_variable(VALUE klass, const char *name, VALUE val)
{
rb_cv_set(klass, name, val);
}
static int
cv_i(ID key, VALUE v, st_data_t a)
{
st_table *tbl = (st_table *)a;
if (rb_is_class_id(key)) {
st_update(tbl, (st_data_t)key, cv_i_update, 0);
}
return ST_CONTINUE;
}
static void*
mod_cvar_at(VALUE mod, void *data)
{
st_table *tbl = data;
if (!tbl) {
tbl = st_init_numtable();
}
mod = original_module(mod);
rb_ivar_foreach(mod, cv_i, (st_data_t)tbl);
return tbl;
}
static void*
mod_cvar_of(VALUE mod, void *data)
{
VALUE tmp = mod;
if (RCLASS_SINGLETON_P(mod)) {
if (rb_namespace_p(RCLASS_ATTACHED_OBJECT(mod))) {
data = mod_cvar_at(tmp, data);
tmp = cvar_front_klass(tmp);
}
}
for (;;) {
data = mod_cvar_at(tmp, data);
tmp = RCLASS_SUPER(tmp);
if (!tmp) break;
}
return data;
}
static int
cv_list_i(st_data_t key, st_data_t value, VALUE ary)
{
ID sym = (ID)key;
rb_ary_push(ary, ID2SYM(sym));
return ST_CONTINUE;
}
static VALUE
cvar_list(void *data)
{
st_table *tbl = data;
VALUE ary;
if (!tbl) return rb_ary_new2(0);
ary = rb_ary_new2(tbl->num_entries);
st_foreach_safe(tbl, cv_list_i, ary);
st_free_table(tbl);
return ary;
}
/*
* call-seq:
* mod.class_variables(inherit=true) -> array
*
* Returns an array of the names of class variables in <i>mod</i>.
* This includes the names of class variables in any included
* modules, unless the <i>inherit</i> parameter is set to
* <code>false</code>.
*
* class One
* @@var1 = 1
* end
* class Two < One
* @@var2 = 2
* end
* One.class_variables #=> [:@@var1]
* Two.class_variables #=> [:@@var2, :@@var1]
* Two.class_variables(false) #=> [:@@var2]
*/
VALUE
rb_mod_class_variables(int argc, const VALUE *argv, VALUE mod)
{
2019-11-25 09:05:53 +03:00
bool inherit = true;
st_table *tbl;
if (rb_check_arity(argc, 0, 1)) inherit = RTEST(argv[0]);
if (inherit) {
tbl = mod_cvar_of(mod, 0);
}
else {
tbl = mod_cvar_at(mod, 0);
}
return cvar_list(tbl);
}
/*
* call-seq:
* remove_class_variable(sym) -> obj
*
* Removes the named class variable from the receiver, returning that
* variable's value.
*
* class Example
* @@var = 99
* puts remove_class_variable(:@@var)
* p(defined? @@var)
* end
*
* <em>produces:</em>
*
* 99
* nil
*/
VALUE
rb_mod_remove_cvar(VALUE mod, VALUE name)
{
const ID id = id_for_var_message(mod, name, class, "wrong class variable name %1$s");
st_data_t val;
if (!id) {
goto not_defined;
}
rb_check_frozen(mod);
val = rb_ivar_delete(mod, id, Qundef);
2022-11-15 07:24:08 +03:00
if (!UNDEF_P(val)) {
return (VALUE)val;
}
if (rb_cvar_defined(mod, id)) {
rb_name_err_raise("cannot remove %1$s for %2$s", mod, ID2SYM(id));
}
not_defined:
rb_name_err_raise("class variable %1$s not defined for %2$s",
mod, name);
UNREACHABLE_RETURN(Qundef);
}
VALUE
rb_iv_get(VALUE obj, const char *name)
{
ID id = rb_check_id_cstr(name, strlen(name), rb_usascii_encoding());
if (!id) {
2019-07-01 08:00:37 +03:00
return Qnil;
}
return rb_ivar_get(obj, id);
}
VALUE
rb_iv_set(VALUE obj, const char *name, VALUE val)
{
ID id = rb_intern(name);
return rb_ivar_set(obj, id, val);
}
2023-11-06 16:32:32 +03:00
static VALUE *
class_ivar_set_shape_ivptr(VALUE obj, void *_data)
{
2023-11-06 16:32:32 +03:00
RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
2023-11-06 16:32:32 +03:00
return RCLASS_IVPTR(obj);
}
2023-11-06 16:32:32 +03:00
static void
class_ivar_set_shape_resize_ivptr(VALUE obj, attr_index_t _old_capa, attr_index_t new_capa, void *_data)
{
REALLOC_N(RCLASS_IVPTR(obj), VALUE, new_capa);
}
2023-11-06 16:32:32 +03:00
static void
class_ivar_set_set_shape(VALUE obj, rb_shape_t *shape, void *_data)
{
rb_shape_set_shape(obj, shape);
}
2023-11-06 16:32:32 +03:00
static void
class_ivar_set_transition_too_complex(VALUE obj, void *_data)
{
rb_evict_ivars_to_hash(obj);
2023-11-06 16:32:32 +03:00
}
2023-11-06 16:32:32 +03:00
static st_table *
class_ivar_set_too_complex_table(VALUE obj, void *_data)
{
RUBY_ASSERT(rb_shape_obj_too_complex(obj));
2023-11-06 16:32:32 +03:00
return RCLASS_IV_HASH(obj);
}
2023-11-06 16:32:32 +03:00
int
rb_class_ivar_set(VALUE obj, ID id, VALUE val)
{
RUBY_ASSERT(RB_TYPE_P(obj, T_CLASS) || RB_TYPE_P(obj, T_MODULE));
bool existing = false;
rb_check_frozen(obj);
2023-11-06 16:32:32 +03:00
RB_VM_LOCK_ENTER();
{
existing = general_ivar_set(obj, id, val, NULL,
class_ivar_set_shape_ivptr,
class_ivar_set_shape_resize_ivptr,
class_ivar_set_set_shape,
class_ivar_set_transition_too_complex,
class_ivar_set_too_complex_table).existing;
}
RB_VM_LOCK_LEAVE();
2023-11-06 16:32:32 +03:00
return existing;
}
static int
tbl_copy_i(ID key, VALUE val, st_data_t dest)
2022-12-02 17:43:53 +03:00
{
rb_class_ivar_set((VALUE)dest, key, val);
return ST_CONTINUE;
}
void
rb_iv_tbl_copy(VALUE dst, VALUE src)
{
RUBY_ASSERT(rb_type(dst) == rb_type(src));
RUBY_ASSERT(RB_TYPE_P(dst, T_CLASS) || RB_TYPE_P(dst, T_MODULE));
RUBY_ASSERT(rb_shape_get_shape(dst)->type == SHAPE_ROOT);
RUBY_ASSERT(!RCLASS_IVPTR(dst));
rb_ivar_foreach(src, tbl_copy_i, dst);
}
2023-03-07 08:34:31 +03:00
rb_const_entry_t *
rb_const_lookup(VALUE klass, ID id)
{
struct rb_id_table *tbl = RCLASS_CONST_TBL(klass);
if (tbl) {
VALUE val;
bool r;
RB_VM_LOCK_ENTER();
{
r = rb_id_table_lookup(tbl, id, &val);
}
RB_VM_LOCK_LEAVE();
if (r) return (rb_const_entry_t *)val;
}
return NULL;
}