2022-10-03 18:14:32 +03:00
|
|
|
#include "vm_core.h"
|
|
|
|
#include "vm_sync.h"
|
|
|
|
#include "shape.h"
|
2022-12-06 14:56:51 +03:00
|
|
|
#include "symbol.h"
|
|
|
|
#include "id_table.h"
|
2022-10-03 18:14:32 +03:00
|
|
|
#include "internal/class.h"
|
2023-12-06 00:25:34 +03:00
|
|
|
#include "internal/error.h"
|
2023-02-08 14:56:53 +03:00
|
|
|
#include "internal/gc.h"
|
2023-12-06 15:37:57 +03:00
|
|
|
#include "internal/object.h"
|
2022-10-03 18:14:32 +03:00
|
|
|
#include "internal/symbol.h"
|
|
|
|
#include "internal/variable.h"
|
2022-12-06 03:48:47 +03:00
|
|
|
#include "variable.h"
|
2022-10-03 18:14:32 +03:00
|
|
|
#include <stdbool.h>
|
|
|
|
|
2023-02-08 04:46:42 +03:00
|
|
|
#ifndef _WIN32
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
|
|
|
|
2022-11-24 01:01:03 +03:00
|
|
|
#ifndef SHAPE_DEBUG
|
|
|
|
#define SHAPE_DEBUG (VM_CHECK_MODE > 0)
|
|
|
|
#endif
|
|
|
|
|
2023-10-26 12:45:52 +03:00
|
|
|
#if SIZEOF_SHAPE_T == 4
|
2023-10-26 23:28:25 +03:00
|
|
|
#if RUBY_DEBUG
|
|
|
|
#define SHAPE_BUFFER_SIZE 0x8000
|
|
|
|
#else
|
2023-10-26 12:45:52 +03:00
|
|
|
#define SHAPE_BUFFER_SIZE 0x80000
|
2023-10-26 23:28:25 +03:00
|
|
|
#endif
|
2023-10-26 12:45:52 +03:00
|
|
|
#else
|
|
|
|
#define SHAPE_BUFFER_SIZE 0x8000
|
|
|
|
#endif
|
|
|
|
|
2023-10-26 23:28:25 +03:00
|
|
|
#define REDBLACK_CACHE_SIZE (SHAPE_BUFFER_SIZE * 32)
|
|
|
|
|
2024-08-16 09:02:30 +03:00
|
|
|
/* This depends on that the allocated memory by Ruby's allocator or
|
|
|
|
* mmap is not located at an odd address. */
|
2023-03-14 01:07:09 +03:00
|
|
|
#define SINGLE_CHILD_TAG 0x1
|
2024-08-16 09:02:30 +03:00
|
|
|
#define TAG_SINGLE_CHILD(x) (struct rb_id_table *)((uintptr_t)(x) | SINGLE_CHILD_TAG)
|
2023-03-14 01:07:09 +03:00
|
|
|
#define SINGLE_CHILD_MASK (~((uintptr_t)SINGLE_CHILD_TAG))
|
2024-08-16 09:02:30 +03:00
|
|
|
#define SINGLE_CHILD_P(x) ((uintptr_t)(x) & SINGLE_CHILD_TAG)
|
|
|
|
#define SINGLE_CHILD(x) (rb_shape_t *)((uintptr_t)(x) & SINGLE_CHILD_MASK)
|
2023-02-08 04:46:42 +03:00
|
|
|
#define ANCESTOR_CACHE_THRESHOLD 10
|
2023-10-26 12:45:52 +03:00
|
|
|
#define MAX_SHAPE_ID (SHAPE_BUFFER_SIZE - 1)
|
2023-10-26 12:08:05 +03:00
|
|
|
#define ANCESTOR_SEARCH_MAX_DEPTH 2
|
2023-03-14 01:07:09 +03:00
|
|
|
|
2022-11-08 23:35:31 +03:00
|
|
|
static ID id_frozen;
|
2022-11-18 02:57:11 +03:00
|
|
|
static ID id_t_object;
|
2022-11-08 23:35:31 +03:00
|
|
|
|
2023-02-08 04:46:42 +03:00
|
|
|
#define LEAF 0
|
|
|
|
#define BLACK 0x0
|
|
|
|
#define RED 0x1
|
|
|
|
|
|
|
|
static redblack_node_t *
|
|
|
|
redblack_left(redblack_node_t * node)
|
|
|
|
{
|
|
|
|
if (node->l == LEAF) {
|
|
|
|
return LEAF;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
RUBY_ASSERT(node->l < GET_SHAPE_TREE()->cache_size);
|
|
|
|
redblack_node_t * left = &GET_SHAPE_TREE()->shape_cache[node->l - 1];
|
|
|
|
return left;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static redblack_node_t *
|
|
|
|
redblack_right(redblack_node_t * node)
|
|
|
|
{
|
|
|
|
if (node->r == LEAF) {
|
|
|
|
return LEAF;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
RUBY_ASSERT(node->r < GET_SHAPE_TREE()->cache_size);
|
|
|
|
redblack_node_t * right = &GET_SHAPE_TREE()->shape_cache[node->r - 1];
|
|
|
|
return right;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static redblack_node_t *
|
|
|
|
redblack_find(redblack_node_t * tree, ID key)
|
|
|
|
{
|
2023-10-24 18:46:54 +03:00
|
|
|
if (tree == LEAF) {
|
|
|
|
return LEAF;
|
|
|
|
}
|
|
|
|
else {
|
2023-11-29 18:30:00 +03:00
|
|
|
RUBY_ASSERT(redblack_left(tree) == LEAF || redblack_left(tree)->key < tree->key);
|
|
|
|
RUBY_ASSERT(redblack_right(tree) == LEAF || redblack_right(tree)->key > tree->key);
|
|
|
|
|
2023-02-08 04:46:42 +03:00
|
|
|
if (tree->key == key) {
|
|
|
|
return tree;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (key < tree->key) {
|
|
|
|
return redblack_find(redblack_left(tree), key);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return redblack_find(redblack_right(tree), key);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-16 08:51:21 +03:00
|
|
|
static inline rb_shape_t *
|
|
|
|
redblack_value(redblack_node_t * node)
|
|
|
|
{
|
|
|
|
// Color is stored in the bottom bit of the shape pointer
|
|
|
|
// Mask away the bit so we get the actual pointer back
|
2024-08-16 05:43:35 +03:00
|
|
|
return (rb_shape_t *)((uintptr_t)node->value & ~(uintptr_t)1);
|
2024-08-16 08:51:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_MMAP
|
2023-02-08 04:46:42 +03:00
|
|
|
static inline char
|
|
|
|
redblack_color(redblack_node_t * node)
|
|
|
|
{
|
|
|
|
return node && ((uintptr_t)node->value & RED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
redblack_red_p(redblack_node_t * node)
|
|
|
|
{
|
|
|
|
return redblack_color(node) == RED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static redblack_id_t
|
|
|
|
redblack_id_for(redblack_node_t * node)
|
|
|
|
{
|
|
|
|
RUBY_ASSERT(node || node == LEAF);
|
|
|
|
if (node == LEAF) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
redblack_node_t * redblack_nodes = GET_SHAPE_TREE()->shape_cache;
|
|
|
|
redblack_id_t id = (redblack_id_t)(node - redblack_nodes);
|
|
|
|
return id + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static redblack_node_t *
|
|
|
|
redblack_new(char color, ID key, rb_shape_t * value, redblack_node_t * left, redblack_node_t * right)
|
|
|
|
{
|
2023-10-26 23:28:25 +03:00
|
|
|
if (GET_SHAPE_TREE()->cache_size + 1 >= REDBLACK_CACHE_SIZE) {
|
|
|
|
// We're out of cache, just quit
|
|
|
|
return LEAF;
|
|
|
|
}
|
2023-11-29 18:30:00 +03:00
|
|
|
|
|
|
|
RUBY_ASSERT(left == LEAF || left->key < key);
|
|
|
|
RUBY_ASSERT(right == LEAF || right->key > key);
|
|
|
|
|
2023-02-08 04:46:42 +03:00
|
|
|
redblack_node_t * redblack_nodes = GET_SHAPE_TREE()->shape_cache;
|
|
|
|
redblack_node_t * node = &redblack_nodes[(GET_SHAPE_TREE()->cache_size)++];
|
|
|
|
node->key = key;
|
|
|
|
node->value = (rb_shape_t *)((uintptr_t)value | color);
|
|
|
|
node->l = redblack_id_for(left);
|
|
|
|
node->r = redblack_id_for(right);
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
|
|
|
static redblack_node_t *
|
|
|
|
redblack_balance(char color, ID key, rb_shape_t * value, redblack_node_t * left, redblack_node_t * right)
|
|
|
|
{
|
|
|
|
if (color == BLACK) {
|
2023-11-30 18:13:20 +03:00
|
|
|
ID new_key, new_left_key, new_right_key;
|
|
|
|
rb_shape_t *new_value, *new_left_value, *new_right_value;
|
|
|
|
redblack_node_t *new_left_left, *new_left_right, *new_right_left, *new_right_right;
|
2023-02-08 04:46:42 +03:00
|
|
|
|
|
|
|
if (redblack_red_p(left) && redblack_red_p(redblack_left(left))) {
|
2023-11-30 18:13:20 +03:00
|
|
|
new_right_key = key;
|
|
|
|
new_right_value = value;
|
|
|
|
new_right_right = right;
|
2023-02-08 04:46:42 +03:00
|
|
|
|
2023-11-30 18:13:20 +03:00
|
|
|
new_key = left->key;
|
|
|
|
new_value = redblack_value(left);
|
|
|
|
new_right_left = redblack_right(left);
|
2023-02-08 04:46:42 +03:00
|
|
|
|
2023-11-30 18:13:20 +03:00
|
|
|
new_left_key = redblack_left(left)->key;
|
|
|
|
new_left_value = redblack_value(redblack_left(left));
|
2023-02-08 04:46:42 +03:00
|
|
|
|
2023-11-30 18:13:20 +03:00
|
|
|
new_left_left = redblack_left(redblack_left(left));
|
|
|
|
new_left_right = redblack_right(redblack_left(left));
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
|
|
|
else if (redblack_red_p(left) && redblack_red_p(redblack_right(left))) {
|
2023-11-30 18:13:20 +03:00
|
|
|
new_right_key = key;
|
|
|
|
new_right_value = value;
|
|
|
|
new_right_right = right;
|
|
|
|
|
|
|
|
new_left_key = left->key;
|
|
|
|
new_left_value = redblack_value(left);
|
|
|
|
new_left_left = redblack_left(left);
|
|
|
|
|
|
|
|
new_key = redblack_right(left)->key;
|
|
|
|
new_value = redblack_value(redblack_right(left));
|
|
|
|
new_left_right = redblack_left(redblack_right(left));
|
|
|
|
new_right_left = redblack_right(redblack_right(left));
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
|
|
|
else if (redblack_red_p(right) && redblack_red_p(redblack_left(right))) {
|
2023-11-30 18:13:20 +03:00
|
|
|
new_left_key = key;
|
|
|
|
new_left_value = value;
|
|
|
|
new_left_left = left;
|
|
|
|
|
|
|
|
new_right_key = right->key;
|
|
|
|
new_right_value = redblack_value(right);
|
|
|
|
new_right_right = redblack_right(right);
|
|
|
|
|
|
|
|
new_key = redblack_left(right)->key;
|
|
|
|
new_value = redblack_value(redblack_left(right));
|
|
|
|
new_left_right = redblack_left(redblack_left(right));
|
|
|
|
new_right_left = redblack_right(redblack_left(right));
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
|
|
|
else if (redblack_red_p(right) && redblack_red_p(redblack_right(right))) {
|
2023-11-30 18:13:20 +03:00
|
|
|
new_left_key = key;
|
|
|
|
new_left_value = value;
|
|
|
|
new_left_left = left;
|
|
|
|
|
|
|
|
new_key = right->key;
|
|
|
|
new_value = redblack_value(right);
|
|
|
|
new_left_right = redblack_left(right);
|
|
|
|
|
|
|
|
new_right_key = redblack_right(right)->key;
|
|
|
|
new_right_value = redblack_value(redblack_right(right));
|
|
|
|
new_right_left = redblack_left(redblack_right(right));
|
|
|
|
new_right_right = redblack_right(redblack_right(right));
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
return redblack_new(color, key, value, left, right);
|
|
|
|
}
|
2023-12-06 18:16:58 +03:00
|
|
|
|
|
|
|
RUBY_ASSERT(new_left_key < new_key);
|
|
|
|
RUBY_ASSERT(new_right_key > new_key);
|
|
|
|
RUBY_ASSERT(new_left_left == LEAF || new_left_left->key < new_left_key);
|
|
|
|
RUBY_ASSERT(new_left_right == LEAF || new_left_right->key > new_left_key);
|
|
|
|
RUBY_ASSERT(new_left_right == LEAF || new_left_right->key < new_key);
|
|
|
|
RUBY_ASSERT(new_right_left == LEAF || new_right_left->key < new_right_key);
|
|
|
|
RUBY_ASSERT(new_right_left == LEAF || new_right_left->key > new_key);
|
|
|
|
RUBY_ASSERT(new_right_right == LEAF || new_right_right->key > new_right_key);
|
|
|
|
|
2023-02-08 04:46:42 +03:00
|
|
|
return redblack_new(
|
2023-11-30 18:13:20 +03:00
|
|
|
RED, new_key, new_value,
|
|
|
|
redblack_new(BLACK, new_left_key, new_left_value, new_left_left, new_left_right),
|
|
|
|
redblack_new(BLACK, new_right_key, new_right_value, new_right_left, new_right_right));
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return redblack_new(color, key, value, left, right);
|
|
|
|
}
|
|
|
|
|
|
|
|
static redblack_node_t *
|
|
|
|
redblack_insert_aux(redblack_node_t * tree, ID key, rb_shape_t * value)
|
|
|
|
{
|
|
|
|
if (tree == LEAF) {
|
|
|
|
return redblack_new(RED, key, value, LEAF, LEAF);
|
|
|
|
}
|
|
|
|
else {
|
2023-11-28 21:37:38 +03:00
|
|
|
redblack_node_t *left, *right;
|
2023-02-08 04:46:42 +03:00
|
|
|
if (key < tree->key) {
|
2023-11-28 21:37:38 +03:00
|
|
|
left = redblack_insert_aux(redblack_left(tree), key, value);
|
|
|
|
RUBY_ASSERT(left != LEAF);
|
|
|
|
right = redblack_right(tree);
|
2023-11-29 18:30:00 +03:00
|
|
|
RUBY_ASSERT(right == LEAF || right->key > tree->key);
|
2023-11-28 21:37:38 +03:00
|
|
|
}
|
|
|
|
else if (key > tree->key) {
|
|
|
|
left = redblack_left(tree);
|
2023-11-29 18:30:00 +03:00
|
|
|
RUBY_ASSERT(left == LEAF || left->key < tree->key);
|
2023-11-28 21:37:38 +03:00
|
|
|
right = redblack_insert_aux(redblack_right(tree), key, value);
|
|
|
|
RUBY_ASSERT(right != LEAF);
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
|
|
|
else {
|
2023-11-28 21:37:38 +03:00
|
|
|
return tree;
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
2023-11-28 21:37:38 +03:00
|
|
|
|
|
|
|
return redblack_balance(
|
|
|
|
redblack_color(tree),
|
|
|
|
tree->key,
|
|
|
|
redblack_value(tree),
|
|
|
|
left,
|
|
|
|
right
|
|
|
|
);
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static redblack_node_t *
|
|
|
|
redblack_force_black(redblack_node_t * node)
|
|
|
|
{
|
|
|
|
node->value = redblack_value(node);
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
|
|
|
static redblack_node_t *
|
|
|
|
redblack_insert(redblack_node_t * tree, ID key, rb_shape_t * value)
|
|
|
|
{
|
|
|
|
redblack_node_t * root = redblack_insert_aux(tree, key, value);
|
|
|
|
|
|
|
|
if (redblack_red_p(root)) {
|
|
|
|
return redblack_force_black(root);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return root;
|
|
|
|
}
|
|
|
|
}
|
2024-04-24 11:13:25 +03:00
|
|
|
#endif
|
2023-02-08 04:46:42 +03:00
|
|
|
|
2023-02-17 16:32:51 +03:00
|
|
|
rb_shape_tree_t *rb_shape_tree_ptr = NULL;
|
|
|
|
|
2022-10-03 18:14:32 +03:00
|
|
|
/*
|
|
|
|
* Shape getters
|
|
|
|
*/
|
2022-11-08 23:35:31 +03:00
|
|
|
rb_shape_t *
|
2022-10-12 12:27:23 +03:00
|
|
|
rb_shape_get_root_shape(void)
|
|
|
|
{
|
2023-02-17 16:32:51 +03:00
|
|
|
return GET_SHAPE_TREE()->root_shape;
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
shape_id_t
|
|
|
|
rb_shape_id(rb_shape_t * shape)
|
|
|
|
{
|
2023-02-17 16:32:51 +03:00
|
|
|
return (shape_id_t)(shape - GET_SHAPE_TREE()->shape_list);
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
2022-12-06 14:56:51 +03:00
|
|
|
void
|
|
|
|
rb_shape_each_shape(each_shape_callback callback, void *data)
|
|
|
|
{
|
|
|
|
rb_shape_t *cursor = rb_shape_get_root_shape();
|
2023-02-17 16:32:51 +03:00
|
|
|
rb_shape_t *end = rb_shape_get_shape_by_id(GET_SHAPE_TREE()->next_shape_id);
|
2022-12-06 14:56:51 +03:00
|
|
|
while (cursor < end) {
|
|
|
|
callback(cursor, data);
|
|
|
|
cursor += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-23 00:02:10 +03:00
|
|
|
RUBY_FUNC_EXPORTED rb_shape_t *
|
2022-10-03 18:14:32 +03:00
|
|
|
rb_shape_get_shape_by_id(shape_id_t shape_id)
|
|
|
|
{
|
|
|
|
RUBY_ASSERT(shape_id != INVALID_SHAPE_ID);
|
|
|
|
|
2023-02-17 16:32:51 +03:00
|
|
|
rb_shape_t *shape = &GET_SHAPE_TREE()->shape_list[shape_id];
|
2022-10-03 18:14:32 +03:00
|
|
|
return shape;
|
|
|
|
}
|
|
|
|
|
2022-11-10 19:36:24 +03:00
|
|
|
rb_shape_t *
|
|
|
|
rb_shape_get_parent(rb_shape_t * shape)
|
|
|
|
{
|
|
|
|
return rb_shape_get_shape_by_id(shape->parent_id);
|
|
|
|
}
|
|
|
|
|
2022-10-03 18:14:32 +03:00
|
|
|
#if !SHAPE_IN_BASIC_FLAGS
|
|
|
|
shape_id_t rb_generic_shape_id(VALUE obj);
|
|
|
|
#endif
|
|
|
|
|
2023-03-07 08:56:40 +03:00
|
|
|
RUBY_FUNC_EXPORTED shape_id_t
|
2022-10-03 18:14:32 +03:00
|
|
|
rb_shape_get_shape_id(VALUE obj)
|
|
|
|
{
|
|
|
|
if (RB_SPECIAL_CONST_P(obj)) {
|
2022-11-08 23:35:31 +03:00
|
|
|
return SPECIAL_CONST_SHAPE_ID;
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#if SHAPE_IN_BASIC_FLAGS
|
|
|
|
return RBASIC_SHAPE_ID(obj);
|
|
|
|
#else
|
|
|
|
switch (BUILTIN_TYPE(obj)) {
|
|
|
|
case T_OBJECT:
|
2022-12-09 01:16:52 +03:00
|
|
|
return ROBJECT_SHAPE_ID(obj);
|
|
|
|
break;
|
2022-10-03 18:14:32 +03:00
|
|
|
case T_CLASS:
|
|
|
|
case T_MODULE:
|
2022-12-09 01:16:52 +03:00
|
|
|
return RCLASS_SHAPE_ID(obj);
|
2022-10-03 18:14:32 +03:00
|
|
|
default:
|
2022-12-09 01:16:52 +03:00
|
|
|
return rb_generic_shape_id(obj);
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-12-06 14:56:51 +03:00
|
|
|
size_t
|
2022-12-06 03:48:47 +03:00
|
|
|
rb_shape_depth(rb_shape_t * shape)
|
|
|
|
{
|
2022-12-06 14:56:51 +03:00
|
|
|
size_t depth = 1;
|
2022-12-06 03:48:47 +03:00
|
|
|
|
|
|
|
while (shape->parent_id != INVALID_SHAPE_ID) {
|
|
|
|
depth++;
|
|
|
|
shape = rb_shape_get_parent(shape);
|
|
|
|
}
|
|
|
|
|
|
|
|
return depth;
|
|
|
|
}
|
|
|
|
|
2022-10-03 18:14:32 +03:00
|
|
|
rb_shape_t*
|
|
|
|
rb_shape_get_shape(VALUE obj)
|
|
|
|
{
|
|
|
|
return rb_shape_get_shape_by_id(rb_shape_get_shape_id(obj));
|
|
|
|
}
|
|
|
|
|
2023-03-10 02:50:58 +03:00
|
|
|
static rb_shape_t *
|
|
|
|
shape_alloc(void)
|
|
|
|
{
|
2023-02-17 16:32:51 +03:00
|
|
|
shape_id_t shape_id = GET_SHAPE_TREE()->next_shape_id;
|
|
|
|
GET_SHAPE_TREE()->next_shape_id++;
|
2023-03-10 02:50:58 +03:00
|
|
|
|
2023-10-19 21:00:54 +03:00
|
|
|
if (shape_id == (MAX_SHAPE_ID + 1)) {
|
2023-03-10 02:50:58 +03:00
|
|
|
// TODO: Make an OutOfShapesError ??
|
2023-05-20 08:00:14 +03:00
|
|
|
rb_bug("Out of shapes");
|
2023-03-10 02:50:58 +03:00
|
|
|
}
|
|
|
|
|
2023-02-17 16:32:51 +03:00
|
|
|
return &GET_SHAPE_TREE()->shape_list[shape_id];
|
2023-03-10 02:50:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static rb_shape_t *
|
|
|
|
rb_shape_alloc_with_parent_id(ID edge_name, shape_id_t parent_id)
|
|
|
|
{
|
|
|
|
rb_shape_t * shape = shape_alloc();
|
|
|
|
|
|
|
|
shape->edge_name = edge_name;
|
|
|
|
shape->next_iv_index = 0;
|
|
|
|
shape->parent_id = parent_id;
|
2023-03-22 18:47:29 +03:00
|
|
|
shape->edges = NULL;
|
2023-03-10 02:50:58 +03:00
|
|
|
|
|
|
|
return shape;
|
|
|
|
}
|
|
|
|
|
|
|
|
static rb_shape_t *
|
2023-03-22 18:47:29 +03:00
|
|
|
rb_shape_alloc(ID edge_name, rb_shape_t * parent, enum shape_type type)
|
2023-03-10 02:50:58 +03:00
|
|
|
{
|
|
|
|
rb_shape_t * shape = rb_shape_alloc_with_parent_id(edge_name, rb_shape_id(parent));
|
2023-03-22 18:47:29 +03:00
|
|
|
shape->type = (uint8_t)type;
|
Rename size_pool -> heap
Now that we've inlined the eden_heap into the size_pool, we should
rename the size_pool to heap. So that Ruby contains multiple heaps, with
different sized objects.
The term heap as a collection of memory pages is more in memory
management nomenclature, whereas size_pool was a name chosen out of
necessity during the development of the Variable Width Allocation
features of Ruby.
The concept of size pools was introduced in order to facilitate
different sized objects (other than the default 40 bytes). They wrapped
the eden heap and the tomb heap, and some related state, and provided a
reasonably simple way of duplicating all related concerns, to provide
multiple pools that all shared the same structure but held different
objects.
Since then various changes have happend in Ruby's memory layout:
* The concept of tomb heaps has been replaced by a global free pages list,
with each page having it's slot size reconfigured at the point when it
is resurrected
* the eden heap has been inlined into the size pool itself, so that now
the size pool directly controls the free_pages list, the sweeping
page, the compaction cursor and the other state that was previously
being managed by the eden heap.
Now that there is no need for a heap wrapper, we should refer to the
collection of pages containing Ruby objects as a heap again rather than
a size pool
2024-10-03 15:53:49 +03:00
|
|
|
shape->heap_index = parent->heap_index;
|
2023-03-22 18:47:29 +03:00
|
|
|
shape->capacity = parent->capacity;
|
2023-03-14 01:07:09 +03:00
|
|
|
shape->edges = 0;
|
2023-03-10 02:50:58 +03:00
|
|
|
return shape;
|
|
|
|
}
|
|
|
|
|
2023-10-23 23:53:42 +03:00
|
|
|
#ifdef HAVE_MMAP
|
2023-02-08 04:46:42 +03:00
|
|
|
static redblack_node_t *
|
|
|
|
redblack_cache_ancestors(rb_shape_t * shape)
|
|
|
|
{
|
2023-10-24 21:59:48 +03:00
|
|
|
if (!(shape->ancestor_index || shape->parent_id == INVALID_SHAPE_ID)) {
|
|
|
|
redblack_node_t * parent_index;
|
2023-02-08 04:46:42 +03:00
|
|
|
|
2023-10-24 21:59:48 +03:00
|
|
|
parent_index = redblack_cache_ancestors(rb_shape_get_parent(shape));
|
2023-02-08 04:46:42 +03:00
|
|
|
|
2023-10-24 21:59:48 +03:00
|
|
|
if (shape->type == SHAPE_IVAR) {
|
|
|
|
shape->ancestor_index = redblack_insert(parent_index, shape->edge_name, shape);
|
2023-11-27 22:05:25 +03:00
|
|
|
|
|
|
|
#if RUBY_DEBUG
|
|
|
|
if (shape->ancestor_index) {
|
|
|
|
redblack_node_t *inserted_node = redblack_find(shape->ancestor_index, shape->edge_name);
|
|
|
|
RUBY_ASSERT(inserted_node);
|
|
|
|
RUBY_ASSERT(redblack_value(inserted_node) == shape);
|
|
|
|
}
|
|
|
|
#endif
|
2023-10-24 21:59:48 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
shape->ancestor_index = parent_index;
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
|
|
|
}
|
2023-10-24 21:59:48 +03:00
|
|
|
|
|
|
|
return shape->ancestor_index;
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
2023-10-23 23:53:42 +03:00
|
|
|
#else
|
|
|
|
static redblack_node_t *
|
|
|
|
redblack_cache_ancestors(rb_shape_t * shape)
|
|
|
|
{
|
|
|
|
return LEAF;
|
|
|
|
}
|
|
|
|
#endif
|
2023-02-08 04:46:42 +03:00
|
|
|
|
2023-03-10 02:58:22 +03:00
|
|
|
static rb_shape_t *
|
|
|
|
rb_shape_alloc_new_child(ID id, rb_shape_t * shape, enum shape_type shape_type)
|
|
|
|
{
|
|
|
|
rb_shape_t * new_shape = rb_shape_alloc(id, shape, shape_type);
|
|
|
|
|
|
|
|
switch (shape_type) {
|
|
|
|
case SHAPE_IVAR:
|
2023-11-11 00:17:39 +03:00
|
|
|
if (UNLIKELY(shape->next_iv_index >= shape->capacity)) {
|
|
|
|
RUBY_ASSERT(shape->next_iv_index == shape->capacity);
|
|
|
|
new_shape->capacity = (uint32_t)rb_malloc_grow_capa(shape->capacity, sizeof(VALUE));
|
|
|
|
}
|
|
|
|
RUBY_ASSERT(new_shape->capacity > shape->next_iv_index);
|
2023-03-10 02:58:22 +03:00
|
|
|
new_shape->next_iv_index = shape->next_iv_index + 1;
|
2023-02-08 04:46:42 +03:00
|
|
|
if (new_shape->next_iv_index > ANCESTOR_CACHE_THRESHOLD) {
|
|
|
|
redblack_cache_ancestors(new_shape);
|
|
|
|
}
|
2023-03-10 02:58:22 +03:00
|
|
|
break;
|
|
|
|
case SHAPE_FROZEN:
|
|
|
|
new_shape->next_iv_index = shape->next_iv_index;
|
|
|
|
break;
|
|
|
|
case SHAPE_OBJ_TOO_COMPLEX:
|
|
|
|
case SHAPE_ROOT:
|
2024-03-13 00:34:38 +03:00
|
|
|
case SHAPE_T_OBJECT:
|
2023-03-10 02:58:22 +03:00
|
|
|
rb_bug("Unreachable");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return new_shape;
|
|
|
|
}
|
|
|
|
|
2022-10-03 18:14:32 +03:00
|
|
|
static rb_shape_t*
|
2023-10-24 22:20:46 +03:00
|
|
|
get_next_shape_internal(rb_shape_t * shape, ID id, enum shape_type shape_type, bool * variation_created, bool new_variations_allowed)
|
2022-10-03 18:14:32 +03:00
|
|
|
{
|
|
|
|
rb_shape_t *res = NULL;
|
2022-11-17 17:47:18 +03:00
|
|
|
|
2022-12-09 01:16:52 +03:00
|
|
|
// There should never be outgoing edges from "too complex"
|
|
|
|
RUBY_ASSERT(rb_shape_id(shape) != OBJ_TOO_COMPLEX_SHAPE_ID);
|
2022-12-09 00:48:48 +03:00
|
|
|
|
2022-12-09 01:16:52 +03:00
|
|
|
*variation_created = false;
|
2022-11-17 17:47:18 +03:00
|
|
|
|
2023-10-27 17:41:03 +03:00
|
|
|
RB_VM_LOCK_ENTER();
|
|
|
|
{
|
|
|
|
// If the current shape has children
|
|
|
|
if (shape->edges) {
|
|
|
|
// Check if it only has one child
|
|
|
|
if (SINGLE_CHILD_P(shape->edges)) {
|
|
|
|
rb_shape_t * child = SINGLE_CHILD(shape->edges);
|
|
|
|
// If the one child has a matching edge name, then great,
|
|
|
|
// we found what we want.
|
|
|
|
if (child->edge_name == id) {
|
|
|
|
res = child;
|
2023-03-14 01:07:09 +03:00
|
|
|
}
|
2023-10-27 17:41:03 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// If it has more than one child, do a hash lookup to find it.
|
|
|
|
VALUE lookup_result;
|
|
|
|
if (rb_id_table_lookup(shape->edges, id, &lookup_result)) {
|
|
|
|
res = (rb_shape_t *)lookup_result;
|
2023-03-14 01:07:09 +03:00
|
|
|
}
|
2023-10-27 17:41:03 +03:00
|
|
|
}
|
|
|
|
}
|
2022-11-17 17:47:18 +03:00
|
|
|
|
2023-10-27 17:41:03 +03:00
|
|
|
// If we didn't find the shape we're looking for we create it.
|
|
|
|
if (!res) {
|
|
|
|
// If we're not allowed to create a new variation, of if we're out of shapes
|
|
|
|
// we return TOO_COMPLEX_SHAPE.
|
|
|
|
if (!new_variations_allowed || GET_SHAPE_TREE()->next_shape_id > MAX_SHAPE_ID) {
|
|
|
|
res = rb_shape_get_shape_by_id(OBJ_TOO_COMPLEX_SHAPE_ID);
|
2023-01-05 16:48:19 +03:00
|
|
|
}
|
|
|
|
else {
|
2023-03-10 02:58:22 +03:00
|
|
|
rb_shape_t * new_shape = rb_shape_alloc_new_child(id, shape, shape_type);
|
2023-10-27 17:41:03 +03:00
|
|
|
|
|
|
|
if (!shape->edges) {
|
|
|
|
// If the shape had no edge yet, we can directly set the new child
|
|
|
|
shape->edges = TAG_SINGLE_CHILD(new_shape);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// If the edge was single child we need to allocate a table.
|
|
|
|
if (SINGLE_CHILD_P(shape->edges)) {
|
|
|
|
rb_shape_t * old_child = SINGLE_CHILD(shape->edges);
|
|
|
|
shape->edges = rb_id_table_create(2);
|
|
|
|
rb_id_table_insert(shape->edges, old_child->edge_name, (VALUE)old_child);
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_id_table_insert(shape->edges, new_shape->edge_name, (VALUE)new_shape);
|
|
|
|
*variation_created = true;
|
|
|
|
}
|
|
|
|
|
2022-12-09 01:16:52 +03:00
|
|
|
res = new_shape;
|
|
|
|
}
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
2023-10-24 22:37:27 +03:00
|
|
|
}
|
2023-10-27 17:41:03 +03:00
|
|
|
RB_VM_LOCK_LEAVE();
|
|
|
|
|
2022-10-03 18:14:32 +03:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2023-03-07 08:34:31 +03:00
|
|
|
int
|
2022-10-03 18:14:32 +03:00
|
|
|
rb_shape_frozen_shape_p(rb_shape_t* shape)
|
|
|
|
{
|
|
|
|
return SHAPE_FROZEN == (enum shape_type)shape->type;
|
|
|
|
}
|
|
|
|
|
2022-12-06 03:48:47 +03:00
|
|
|
static rb_shape_t *
|
2023-11-17 22:05:37 +03:00
|
|
|
remove_shape_recursive(rb_shape_t *shape, ID id, rb_shape_t **removed_shape)
|
2022-12-06 03:48:47 +03:00
|
|
|
{
|
|
|
|
if (shape->parent_id == INVALID_SHAPE_ID) {
|
|
|
|
// We've hit the top of the shape tree and couldn't find the
|
|
|
|
// IV we wanted to remove, so return NULL
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (shape->type == SHAPE_IVAR && shape->edge_name == id) {
|
2023-11-17 22:05:37 +03:00
|
|
|
*removed_shape = shape;
|
|
|
|
|
2022-12-06 03:48:47 +03:00
|
|
|
return rb_shape_get_parent(shape);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// This isn't the IV we want to remove, keep walking up.
|
2023-11-17 22:05:37 +03:00
|
|
|
rb_shape_t *new_parent = remove_shape_recursive(rb_shape_get_parent(shape), id, removed_shape);
|
2022-12-06 03:48:47 +03:00
|
|
|
|
|
|
|
// We found a new parent. Create a child of the new parent that
|
|
|
|
// has the same attributes as this shape.
|
|
|
|
if (new_parent) {
|
2023-11-01 14:15:12 +03:00
|
|
|
if (UNLIKELY(new_parent->type == SHAPE_OBJ_TOO_COMPLEX)) {
|
|
|
|
return new_parent;
|
|
|
|
}
|
|
|
|
|
2023-11-02 12:37:09 +03:00
|
|
|
bool dont_care;
|
2023-11-17 22:05:37 +03:00
|
|
|
rb_shape_t *new_child = get_next_shape_internal(new_parent, shape->edge_name, shape->type, &dont_care, true);
|
2023-11-02 12:37:09 +03:00
|
|
|
if (UNLIKELY(new_child->type == SHAPE_OBJ_TOO_COMPLEX)) {
|
|
|
|
return new_child;
|
|
|
|
}
|
|
|
|
|
2023-11-11 00:17:51 +03:00
|
|
|
RUBY_ASSERT(new_child->capacity <= shape->capacity);
|
|
|
|
|
2022-12-06 03:48:47 +03:00
|
|
|
return new_child;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// We went all the way to the top of the shape tree and couldn't
|
|
|
|
// find an IV to remove, so return NULL
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-01 14:15:12 +03:00
|
|
|
bool
|
2023-11-17 22:05:37 +03:00
|
|
|
rb_shape_transition_shape_remove_ivar(VALUE obj, ID id, rb_shape_t *shape, VALUE *removed)
|
2022-12-06 03:48:47 +03:00
|
|
|
{
|
2023-11-01 14:15:12 +03:00
|
|
|
if (UNLIKELY(shape->type == SHAPE_OBJ_TOO_COMPLEX)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-11-17 22:05:37 +03:00
|
|
|
rb_shape_t *removed_shape = NULL;
|
|
|
|
rb_shape_t *new_shape = remove_shape_recursive(shape, id, &removed_shape);
|
2022-12-06 03:48:47 +03:00
|
|
|
if (new_shape) {
|
2023-11-17 22:05:37 +03:00
|
|
|
RUBY_ASSERT(removed_shape != NULL);
|
|
|
|
|
2023-11-01 14:15:12 +03:00
|
|
|
if (UNLIKELY(new_shape->type == SHAPE_OBJ_TOO_COMPLEX)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-11-17 22:05:37 +03:00
|
|
|
RUBY_ASSERT(new_shape->next_iv_index == shape->next_iv_index - 1);
|
|
|
|
|
|
|
|
VALUE *ivptr;
|
|
|
|
switch(BUILTIN_TYPE(obj)) {
|
|
|
|
case T_CLASS:
|
|
|
|
case T_MODULE:
|
|
|
|
ivptr = RCLASS_IVPTR(obj);
|
|
|
|
break;
|
|
|
|
case T_OBJECT:
|
|
|
|
ivptr = ROBJECT_IVPTR(obj);
|
|
|
|
break;
|
|
|
|
default: {
|
|
|
|
struct gen_ivtbl *ivtbl;
|
|
|
|
rb_gen_ivtbl_get(obj, id, &ivtbl);
|
|
|
|
ivptr = ivtbl->as.shape.ivptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*removed = ivptr[removed_shape->next_iv_index - 1];
|
|
|
|
|
|
|
|
memmove(&ivptr[removed_shape->next_iv_index - 1], &ivptr[removed_shape->next_iv_index],
|
|
|
|
((new_shape->next_iv_index + 1) - removed_shape->next_iv_index) * sizeof(VALUE));
|
|
|
|
|
2023-12-06 15:37:57 +03:00
|
|
|
// Re-embed objects when instances become small enough
|
|
|
|
// This is necessary because YJIT assumes that objects with the same shape
|
|
|
|
// have the same embeddedness for efficiency (avoid extra checks)
|
|
|
|
if (BUILTIN_TYPE(obj) == T_OBJECT &&
|
|
|
|
!RB_FL_TEST_RAW(obj, ROBJECT_EMBED) &&
|
|
|
|
rb_obj_embedded_size(new_shape->next_iv_index) <= rb_gc_obj_slot_size(obj)) {
|
|
|
|
RB_FL_SET_RAW(obj, ROBJECT_EMBED);
|
|
|
|
memcpy(ROBJECT_IVPTR(obj), ivptr, new_shape->next_iv_index * sizeof(VALUE));
|
|
|
|
xfree(ivptr);
|
|
|
|
}
|
|
|
|
|
2022-12-06 03:48:47 +03:00
|
|
|
rb_shape_set_shape(obj, new_shape);
|
|
|
|
}
|
2023-11-01 14:15:12 +03:00
|
|
|
return true;
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
2023-10-20 02:01:35 +03:00
|
|
|
rb_shape_t *
|
2022-10-03 18:14:32 +03:00
|
|
|
rb_shape_transition_shape_frozen(VALUE obj)
|
|
|
|
{
|
|
|
|
rb_shape_t* shape = rb_shape_get_shape(obj);
|
|
|
|
RUBY_ASSERT(shape);
|
|
|
|
RUBY_ASSERT(RB_OBJ_FROZEN(obj));
|
|
|
|
|
2022-12-09 01:16:52 +03:00
|
|
|
if (rb_shape_frozen_shape_p(shape) || rb_shape_obj_too_complex(obj)) {
|
2023-10-20 02:01:35 +03:00
|
|
|
return shape;
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
rb_shape_t* next_shape;
|
|
|
|
|
|
|
|
if (shape == rb_shape_get_root_shape()) {
|
2023-10-20 02:01:35 +03:00
|
|
|
return rb_shape_get_shape_by_id(SPECIAL_CONST_SHAPE_ID);
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
2022-12-09 00:48:48 +03:00
|
|
|
bool dont_care;
|
2023-10-24 22:05:05 +03:00
|
|
|
next_shape = get_next_shape_internal(shape, (ID)id_frozen, SHAPE_FROZEN, &dont_care, true);
|
2022-10-03 18:14:32 +03:00
|
|
|
|
|
|
|
RUBY_ASSERT(next_shape);
|
2023-10-20 02:01:35 +03:00
|
|
|
return next_shape;
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
2022-11-08 23:35:31 +03:00
|
|
|
/*
|
|
|
|
* This function is used for assertions where we don't want to increment
|
|
|
|
* max_iv_count
|
|
|
|
*/
|
|
|
|
rb_shape_t *
|
|
|
|
rb_shape_get_next_iv_shape(rb_shape_t* shape, ID id)
|
|
|
|
{
|
2022-12-06 14:56:51 +03:00
|
|
|
RUBY_ASSERT(!is_instance_id(id) || RTEST(rb_sym2str(ID2SYM(id))));
|
2022-12-09 00:48:48 +03:00
|
|
|
bool dont_care;
|
2023-10-24 22:05:05 +03:00
|
|
|
return get_next_shape_internal(shape, id, SHAPE_IVAR, &dont_care, true);
|
2022-11-08 23:35:31 +03:00
|
|
|
}
|
|
|
|
|
2024-06-04 18:27:29 +03:00
|
|
|
static inline rb_shape_t *
|
|
|
|
shape_get_next(rb_shape_t *shape, VALUE obj, ID id, bool emit_warnings)
|
2022-10-03 18:14:32 +03:00
|
|
|
{
|
2022-12-09 00:48:48 +03:00
|
|
|
RUBY_ASSERT(!is_instance_id(id) || RTEST(rb_sym2str(ID2SYM(id))));
|
2023-11-07 20:09:55 +03:00
|
|
|
if (UNLIKELY(shape->type == SHAPE_OBJ_TOO_COMPLEX)) {
|
|
|
|
return shape;
|
|
|
|
}
|
2022-12-09 00:48:48 +03:00
|
|
|
|
2023-11-24 22:31:36 +03:00
|
|
|
#if RUBY_DEBUG
|
|
|
|
attr_index_t index;
|
|
|
|
if (rb_shape_get_iv_index(shape, id, &index)) {
|
|
|
|
rb_bug("rb_shape_get_next: trying to create ivar that already exists at index %u", index);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-12-09 01:16:52 +03:00
|
|
|
bool allow_new_shape = true;
|
|
|
|
|
|
|
|
if (BUILTIN_TYPE(obj) == T_OBJECT) {
|
|
|
|
VALUE klass = rb_obj_class(obj);
|
|
|
|
allow_new_shape = RCLASS_EXT(klass)->variation_count < SHAPE_MAX_VARIATIONS;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool variation_created = false;
|
2023-11-11 00:17:39 +03:00
|
|
|
rb_shape_t *new_shape = get_next_shape_internal(shape, id, SHAPE_IVAR, &variation_created, allow_new_shape);
|
2022-12-09 01:16:52 +03:00
|
|
|
|
2022-11-08 23:35:31 +03:00
|
|
|
// Check if we should update max_iv_count on the object's class
|
|
|
|
if (BUILTIN_TYPE(obj) == T_OBJECT) {
|
|
|
|
VALUE klass = rb_obj_class(obj);
|
|
|
|
if (new_shape->next_iv_index > RCLASS_EXT(klass)->max_iv_count) {
|
|
|
|
RCLASS_EXT(klass)->max_iv_count = new_shape->next_iv_index;
|
|
|
|
}
|
2022-12-09 00:48:48 +03:00
|
|
|
|
|
|
|
if (variation_created) {
|
|
|
|
RCLASS_EXT(klass)->variation_count++;
|
2024-06-04 18:27:29 +03:00
|
|
|
if (emit_warnings && rb_warning_category_enabled_p(RB_WARN_CATEGORY_PERFORMANCE)) {
|
2023-04-13 13:11:14 +03:00
|
|
|
if (RCLASS_EXT(klass)->variation_count >= SHAPE_MAX_VARIATIONS) {
|
2023-04-18 13:36:57 +03:00
|
|
|
rb_category_warn(
|
2023-04-13 13:11:14 +03:00
|
|
|
RB_WARN_CATEGORY_PERFORMANCE,
|
2023-12-18 11:59:06 +03:00
|
|
|
"The class %"PRIsVALUE" reached %d shape variations, instance variables accesses will be slower and memory usage increased.\n"
|
2023-12-20 11:05:44 +03:00
|
|
|
"It is recommended to define instance variables in a consistent order, for instance by eagerly defining them all in the #initialize method.",
|
2023-12-18 11:59:06 +03:00
|
|
|
rb_class_path(klass),
|
|
|
|
SHAPE_MAX_VARIATIONS
|
2023-04-13 13:11:14 +03:00
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
2022-12-09 00:48:48 +03:00
|
|
|
}
|
2022-11-08 23:35:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return new_shape;
|
|
|
|
}
|
|
|
|
|
2024-06-04 18:27:29 +03:00
|
|
|
rb_shape_t *
|
|
|
|
rb_shape_get_next(rb_shape_t *shape, VALUE obj, ID id)
|
|
|
|
{
|
|
|
|
return shape_get_next(shape, obj, id, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_shape_t *
|
|
|
|
rb_shape_get_next_no_warnings(rb_shape_t *shape, VALUE obj, ID id)
|
|
|
|
{
|
|
|
|
return shape_get_next(shape, obj, id, false);
|
|
|
|
}
|
|
|
|
|
2023-10-26 12:08:05 +03:00
|
|
|
// Same as rb_shape_get_iv_index, but uses a provided valid shape id and index
|
|
|
|
// to return a result faster if branches of the shape tree are closely related.
|
|
|
|
bool
|
|
|
|
rb_shape_get_iv_index_with_hint(shape_id_t shape_id, ID id, attr_index_t *value, shape_id_t *shape_id_hint)
|
|
|
|
{
|
|
|
|
attr_index_t index_hint = *value;
|
|
|
|
rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
|
|
|
|
rb_shape_t *initial_shape = shape;
|
|
|
|
|
|
|
|
if (*shape_id_hint == INVALID_SHAPE_ID) {
|
|
|
|
*shape_id_hint = shape_id;
|
|
|
|
return rb_shape_get_iv_index(shape, id, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_shape_t * shape_hint = rb_shape_get_shape_by_id(*shape_id_hint);
|
|
|
|
|
|
|
|
// We assume it's likely shape_id_hint and shape_id have a close common
|
|
|
|
// ancestor, so we check up to ANCESTOR_SEARCH_MAX_DEPTH ancestors before
|
|
|
|
// eventually using the index, as in case of a match it will be faster.
|
|
|
|
// However if the shape doesn't have an index, we walk the entire tree.
|
|
|
|
int depth = INT_MAX;
|
|
|
|
if (shape->ancestor_index && shape->next_iv_index >= ANCESTOR_CACHE_THRESHOLD) {
|
|
|
|
depth = ANCESTOR_SEARCH_MAX_DEPTH;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (depth > 0 && shape->next_iv_index > index_hint) {
|
|
|
|
while (shape_hint->next_iv_index > shape->next_iv_index) {
|
|
|
|
shape_hint = rb_shape_get_parent(shape_hint);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (shape_hint == shape) {
|
|
|
|
// We've found a common ancestor so use the index hint
|
|
|
|
*value = index_hint;
|
|
|
|
*shape_id_hint = rb_shape_id(shape);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (shape->edge_name == id) {
|
|
|
|
// We found the matching id before a common ancestor
|
|
|
|
*value = shape->next_iv_index - 1;
|
|
|
|
*shape_id_hint = rb_shape_id(shape);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
shape = rb_shape_get_parent(shape);
|
|
|
|
depth--;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the original shape had an index but its ancestor doesn't
|
|
|
|
// we switch back to the original one as it will be faster.
|
|
|
|
if (!shape->ancestor_index && initial_shape->ancestor_index) {
|
|
|
|
shape = initial_shape;
|
|
|
|
}
|
|
|
|
*shape_id_hint = shape_id;
|
|
|
|
return rb_shape_get_iv_index(shape, id, value);
|
|
|
|
}
|
|
|
|
|
2023-11-24 22:31:50 +03:00
|
|
|
static bool
|
|
|
|
shape_get_iv_index(rb_shape_t *shape, ID id, attr_index_t *value)
|
2022-10-12 12:27:23 +03:00
|
|
|
{
|
2022-10-03 20:52:40 +03:00
|
|
|
while (shape->parent_id != INVALID_SHAPE_ID) {
|
2023-11-24 22:31:50 +03:00
|
|
|
if (shape->edge_name == id) {
|
|
|
|
enum shape_type shape_type;
|
|
|
|
shape_type = (enum shape_type)shape->type;
|
|
|
|
|
|
|
|
switch (shape_type) {
|
|
|
|
case SHAPE_IVAR:
|
|
|
|
RUBY_ASSERT(shape->next_iv_index > 0);
|
2022-10-21 23:24:29 +03:00
|
|
|
*value = shape->next_iv_index - 1;
|
2022-10-12 12:27:23 +03:00
|
|
|
return true;
|
2023-11-24 22:31:50 +03:00
|
|
|
case SHAPE_ROOT:
|
|
|
|
case SHAPE_T_OBJECT:
|
2022-10-12 12:27:23 +03:00
|
|
|
return false;
|
2023-11-24 22:31:50 +03:00
|
|
|
case SHAPE_OBJ_TOO_COMPLEX:
|
|
|
|
case SHAPE_FROZEN:
|
|
|
|
rb_bug("Ivar should not exist on transition");
|
2023-02-08 04:46:42 +03:00
|
|
|
}
|
|
|
|
}
|
2023-11-24 22:31:50 +03:00
|
|
|
|
2022-11-10 19:36:24 +03:00
|
|
|
shape = rb_shape_get_parent(shape);
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
2023-11-24 22:31:50 +03:00
|
|
|
|
2022-10-03 18:14:32 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-11-24 22:31:50 +03:00
|
|
|
static bool
|
|
|
|
shape_cache_get_iv_index(rb_shape_t *shape, ID id, attr_index_t *value)
|
|
|
|
{
|
|
|
|
if (shape->ancestor_index && shape->next_iv_index >= ANCESTOR_CACHE_THRESHOLD) {
|
|
|
|
redblack_node_t *node = redblack_find(shape->ancestor_index, id);
|
|
|
|
if (node) {
|
|
|
|
rb_shape_t *shape = redblack_value(node);
|
|
|
|
*value = shape->next_iv_index - 1;
|
|
|
|
|
|
|
|
#if RUBY_DEBUG
|
|
|
|
attr_index_t shape_tree_index;
|
|
|
|
RUBY_ASSERT(shape_get_iv_index(shape, id, &shape_tree_index));
|
|
|
|
RUBY_ASSERT(shape_tree_index == *value);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify the cache is correct by checking that this instance variable
|
2023-11-27 22:04:56 +03:00
|
|
|
* does not exist in the shape tree either. */
|
2023-11-24 22:31:50 +03:00
|
|
|
RUBY_ASSERT(!shape_get_iv_index(shape, id, value));
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rb_shape_get_iv_index(rb_shape_t *shape, ID id, attr_index_t *value)
|
|
|
|
{
|
|
|
|
// It doesn't make sense to ask for the index of an IV that's stored
|
|
|
|
// on an object that is "too complex" as it uses a hash for storing IVs
|
|
|
|
RUBY_ASSERT(rb_shape_id(shape) != OBJ_TOO_COMPLEX_SHAPE_ID);
|
|
|
|
|
|
|
|
if (!shape_cache_get_iv_index(shape, id, value)) {
|
If we have a shape cache we should use it
If there is a shape cache, then we should believe the results instead of
doing a linear search for non-existent items
This fixes a case where checking the index of an undefined ivar would
result in an O(n) search. Now we get O(log n).
Benchmark is as follows:
```ruby
N = ARGV[0].to_i
class ManyIVs
class_eval "def initialize;" +
N.times.map { "@a#{_1} = #{_1}" }.join("\n") +
"end"
def check
defined?(@not)
end
end
class Subclass < ManyIVs
def initialize
super
@foo = 123
end
end
def t
s = Process.clock_gettime Process::CLOCK_MONOTONIC
yield
Process.clock_gettime(Process::CLOCK_MONOTONIC) - s
end
def test
a = ManyIVs.new
b = Subclass.new
t { 200000.times { a.check; b.check } }
end
puts "#{N},#{test}"
```
On the master branch:
```
$ for i in (seq 1 3 32); ./miniruby test.rb $i; end
1,0.015619999991031364
4,0.013061000005109236
7,0.013365999999223277
10,0.015474999992875382
13,0.017674999980954453
16,0.020055999979376793
19,0.02260500000556931
22,0.0254080000158865
25,0.02806599999894388
28,0.031244999991031364
31,0.034568000002764165
```
On this branch:
```
$ for i in (seq 1 3 32); ./miniruby test.rb $i; end
1,0.015848999988520518
4,0.013225000002421439
7,0.013049000001046807
10,0.010697999998228624
13,0.010902000009082258
16,0.011448000004747882
19,0.01151199999731034
22,0.011539999977685511
25,0.01173300002119504
28,0.011900000012246892
31,0.012278999987756833
```
2024-03-31 03:29:09 +03:00
|
|
|
// If it wasn't in the ancestor cache, then don't do a linear search
|
|
|
|
if (shape->ancestor_index && shape->next_iv_index >= ANCESTOR_CACHE_THRESHOLD) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return shape_get_iv_index(shape, id, value);
|
|
|
|
}
|
2023-11-24 22:31:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-07 08:34:31 +03:00
|
|
|
void
|
2022-10-03 18:14:32 +03:00
|
|
|
rb_shape_set_shape(VALUE obj, rb_shape_t* shape)
|
|
|
|
{
|
|
|
|
rb_shape_set_shape_id(obj, rb_shape_id(shape));
|
|
|
|
}
|
|
|
|
|
2022-12-02 20:33:20 +03:00
|
|
|
int32_t
|
|
|
|
rb_shape_id_offset(void)
|
|
|
|
{
|
2022-12-06 00:20:11 +03:00
|
|
|
return sizeof(uintptr_t) - SHAPE_ID_NUM_BITS / sizeof(uintptr_t);
|
2022-12-02 20:33:20 +03:00
|
|
|
}
|
|
|
|
|
2022-12-13 18:11:57 +03:00
|
|
|
rb_shape_t *
|
|
|
|
rb_shape_traverse_from_new_root(rb_shape_t *initial_shape, rb_shape_t *dest_shape)
|
|
|
|
{
|
|
|
|
RUBY_ASSERT(initial_shape->type == SHAPE_T_OBJECT);
|
|
|
|
rb_shape_t *next_shape = initial_shape;
|
|
|
|
|
|
|
|
if (dest_shape->type != initial_shape->type) {
|
|
|
|
next_shape = rb_shape_traverse_from_new_root(initial_shape, rb_shape_get_parent(dest_shape));
|
|
|
|
if (!next_shape) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch ((enum shape_type)dest_shape->type) {
|
|
|
|
case SHAPE_IVAR:
|
2023-03-17 17:12:37 +03:00
|
|
|
case SHAPE_FROZEN:
|
2022-12-13 18:11:57 +03:00
|
|
|
if (!next_shape->edges) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2023-01-05 16:48:19 +03:00
|
|
|
|
|
|
|
VALUE lookup_result;
|
2023-03-14 01:07:09 +03:00
|
|
|
if (SINGLE_CHILD_P(next_shape->edges)) {
|
|
|
|
rb_shape_t * child = SINGLE_CHILD(next_shape->edges);
|
|
|
|
if (child->edge_name == dest_shape->edge_name) {
|
|
|
|
return child;
|
|
|
|
}
|
2023-04-19 00:45:18 +03:00
|
|
|
else {
|
|
|
|
return NULL;
|
|
|
|
}
|
2023-01-05 16:48:19 +03:00
|
|
|
}
|
|
|
|
else {
|
2023-03-14 01:07:09 +03:00
|
|
|
if (rb_id_table_lookup(next_shape->edges, dest_shape->edge_name, &lookup_result)) {
|
|
|
|
next_shape = (rb_shape_t *)lookup_result;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return NULL;
|
|
|
|
}
|
2022-12-13 18:11:57 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SHAPE_ROOT:
|
|
|
|
case SHAPE_T_OBJECT:
|
|
|
|
break;
|
|
|
|
case SHAPE_OBJ_TOO_COMPLEX:
|
2023-05-20 08:00:14 +03:00
|
|
|
rb_bug("Unreachable");
|
2022-12-13 18:11:57 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return next_shape;
|
|
|
|
}
|
|
|
|
|
2022-11-08 23:35:31 +03:00
|
|
|
rb_shape_t *
|
|
|
|
rb_shape_rebuild_shape(rb_shape_t * initial_shape, rb_shape_t * dest_shape)
|
|
|
|
{
|
2023-10-30 14:29:59 +03:00
|
|
|
RUBY_ASSERT(rb_shape_id(initial_shape) != OBJ_TOO_COMPLEX_SHAPE_ID);
|
|
|
|
RUBY_ASSERT(rb_shape_id(dest_shape) != OBJ_TOO_COMPLEX_SHAPE_ID);
|
|
|
|
|
2022-11-08 23:35:31 +03:00
|
|
|
rb_shape_t * midway_shape;
|
|
|
|
|
2022-11-18 02:57:11 +03:00
|
|
|
RUBY_ASSERT(initial_shape->type == SHAPE_T_OBJECT);
|
|
|
|
|
|
|
|
if (dest_shape->type != initial_shape->type) {
|
2022-11-10 19:36:24 +03:00
|
|
|
midway_shape = rb_shape_rebuild_shape(initial_shape, rb_shape_get_parent(dest_shape));
|
2023-10-30 14:29:59 +03:00
|
|
|
if (UNLIKELY(rb_shape_id(midway_shape) == OBJ_TOO_COMPLEX_SHAPE_ID)) {
|
|
|
|
return midway_shape;
|
|
|
|
}
|
2022-11-08 23:35:31 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
midway_shape = initial_shape;
|
|
|
|
}
|
|
|
|
|
2022-11-18 02:57:11 +03:00
|
|
|
switch ((enum shape_type)dest_shape->type) {
|
2022-11-17 22:43:46 +03:00
|
|
|
case SHAPE_IVAR:
|
2023-11-11 00:17:39 +03:00
|
|
|
midway_shape = rb_shape_get_next_iv_shape(midway_shape, dest_shape->edge_name);
|
2022-11-17 22:43:46 +03:00
|
|
|
break;
|
|
|
|
case SHAPE_ROOT:
|
|
|
|
case SHAPE_FROZEN:
|
2022-11-18 02:57:11 +03:00
|
|
|
case SHAPE_T_OBJECT:
|
2022-11-17 22:43:46 +03:00
|
|
|
break;
|
2022-12-09 01:16:52 +03:00
|
|
|
case SHAPE_OBJ_TOO_COMPLEX:
|
2023-05-20 08:00:14 +03:00
|
|
|
rb_bug("Unreachable");
|
2022-12-09 01:16:52 +03:00
|
|
|
break;
|
2022-11-08 23:35:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return midway_shape;
|
|
|
|
}
|
|
|
|
|
2023-03-07 08:56:40 +03:00
|
|
|
RUBY_FUNC_EXPORTED bool
|
2022-12-09 01:16:52 +03:00
|
|
|
rb_shape_obj_too_complex(VALUE obj)
|
|
|
|
{
|
|
|
|
return rb_shape_get_shape_id(obj) == OBJ_TOO_COMPLEX_SHAPE_ID;
|
|
|
|
}
|
|
|
|
|
2022-12-06 14:56:51 +03:00
|
|
|
size_t
|
|
|
|
rb_shape_edges_count(rb_shape_t *shape)
|
|
|
|
{
|
|
|
|
if (shape->edges) {
|
2023-03-14 01:07:09 +03:00
|
|
|
if (SINGLE_CHILD_P(shape->edges)) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_id_table_size(shape->edges);
|
|
|
|
}
|
2022-12-06 14:56:51 +03:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
rb_shape_memsize(rb_shape_t *shape)
|
|
|
|
{
|
|
|
|
size_t memsize = sizeof(rb_shape_t);
|
2023-03-14 01:07:09 +03:00
|
|
|
if (shape->edges && !SINGLE_CHILD_P(shape->edges)) {
|
2022-12-06 14:56:51 +03:00
|
|
|
memsize += rb_id_table_memsize(shape->edges);
|
|
|
|
}
|
|
|
|
return memsize;
|
|
|
|
}
|
|
|
|
|
2022-11-24 01:01:03 +03:00
|
|
|
#if SHAPE_DEBUG
|
2022-10-03 18:14:32 +03:00
|
|
|
/*
|
|
|
|
* Exposing Shape to Ruby via RubyVM.debug_shape
|
|
|
|
*/
|
2022-11-08 23:35:31 +03:00
|
|
|
|
2022-12-23 02:03:40 +03:00
|
|
|
/* :nodoc: */
|
2022-12-09 01:16:52 +03:00
|
|
|
static VALUE
|
|
|
|
rb_shape_too_complex(VALUE self)
|
|
|
|
{
|
|
|
|
rb_shape_t * shape;
|
2022-12-16 00:38:53 +03:00
|
|
|
shape = rb_shape_get_shape_by_id(NUM2INT(rb_struct_getmember(self, rb_intern("id"))));
|
2022-12-09 01:16:52 +03:00
|
|
|
if (rb_shape_id(shape) == OBJ_TOO_COMPLEX_SHAPE_ID) {
|
|
|
|
return Qtrue;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qfalse;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-12 12:27:23 +03:00
|
|
|
static VALUE
|
|
|
|
parse_key(ID key)
|
|
|
|
{
|
2022-12-06 14:56:51 +03:00
|
|
|
if (is_instance_id(key)) {
|
2022-10-03 18:14:32 +03:00
|
|
|
return ID2SYM(key);
|
|
|
|
}
|
2022-12-06 14:56:51 +03:00
|
|
|
return LONG2NUM(key);
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
2022-12-16 00:38:53 +03:00
|
|
|
static VALUE rb_shape_edge_name(rb_shape_t * shape);
|
|
|
|
|
2022-10-03 18:14:32 +03:00
|
|
|
static VALUE
|
2022-10-12 12:27:23 +03:00
|
|
|
rb_shape_t_to_rb_cShape(rb_shape_t *shape)
|
|
|
|
{
|
2022-12-16 00:38:53 +03:00
|
|
|
VALUE rb_cShape = rb_const_get(rb_cRubyVM, rb_intern("Shape"));
|
2022-10-03 18:14:32 +03:00
|
|
|
|
2022-12-16 00:38:53 +03:00
|
|
|
VALUE obj = rb_struct_new(rb_cShape,
|
|
|
|
INT2NUM(rb_shape_id(shape)),
|
|
|
|
INT2NUM(shape->parent_id),
|
|
|
|
rb_shape_edge_name(shape),
|
|
|
|
INT2NUM(shape->next_iv_index),
|
Rename size_pool -> heap
Now that we've inlined the eden_heap into the size_pool, we should
rename the size_pool to heap. So that Ruby contains multiple heaps, with
different sized objects.
The term heap as a collection of memory pages is more in memory
management nomenclature, whereas size_pool was a name chosen out of
necessity during the development of the Variable Width Allocation
features of Ruby.
The concept of size pools was introduced in order to facilitate
different sized objects (other than the default 40 bytes). They wrapped
the eden heap and the tomb heap, and some related state, and provided a
reasonably simple way of duplicating all related concerns, to provide
multiple pools that all shared the same structure but held different
objects.
Since then various changes have happend in Ruby's memory layout:
* The concept of tomb heaps has been replaced by a global free pages list,
with each page having it's slot size reconfigured at the point when it
is resurrected
* the eden heap has been inlined into the size pool itself, so that now
the size pool directly controls the free_pages list, the sweeping
page, the compaction cursor and the other state that was previously
being managed by the eden heap.
Now that there is no need for a heap wrapper, we should refer to the
collection of pages containing Ruby objects as a heap again rather than
a size pool
2024-10-03 15:53:49 +03:00
|
|
|
INT2NUM(shape->heap_index),
|
2022-12-16 00:38:53 +03:00
|
|
|
INT2NUM(shape->type),
|
|
|
|
INT2NUM(shape->capacity));
|
|
|
|
rb_obj_freeze(obj);
|
|
|
|
return obj;
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
2022-10-12 12:27:23 +03:00
|
|
|
static enum rb_id_table_iterator_result
|
|
|
|
rb_edges_to_hash(ID key, VALUE value, void *ref)
|
2022-10-03 18:14:32 +03:00
|
|
|
{
|
|
|
|
rb_hash_aset(*(VALUE *)ref, parse_key(key), rb_shape_t_to_rb_cShape((rb_shape_t*)value));
|
|
|
|
return ID_TABLE_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2022-12-23 02:03:40 +03:00
|
|
|
/* :nodoc: */
|
2022-10-03 18:14:32 +03:00
|
|
|
static VALUE
|
|
|
|
rb_shape_edges(VALUE self)
|
|
|
|
{
|
|
|
|
rb_shape_t* shape;
|
2022-12-16 00:38:53 +03:00
|
|
|
|
|
|
|
shape = rb_shape_get_shape_by_id(NUM2INT(rb_struct_getmember(self, rb_intern("id"))));
|
2022-10-03 18:14:32 +03:00
|
|
|
|
|
|
|
VALUE hash = rb_hash_new();
|
|
|
|
|
|
|
|
if (shape->edges) {
|
2023-03-14 01:07:09 +03:00
|
|
|
if (SINGLE_CHILD_P(shape->edges)) {
|
|
|
|
rb_shape_t * child = SINGLE_CHILD(shape->edges);
|
|
|
|
rb_edges_to_hash(child->edge_name, (VALUE)child, &hash);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_id_table_foreach(shape->edges, rb_edges_to_hash, &hash);
|
|
|
|
}
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2022-12-16 00:38:53 +03:00
|
|
|
rb_shape_edge_name(rb_shape_t * shape)
|
2022-10-03 18:14:32 +03:00
|
|
|
{
|
2022-12-06 14:56:51 +03:00
|
|
|
if (shape->edge_name) {
|
|
|
|
if (is_instance_id(shape->edge_name)) {
|
2022-11-08 23:35:31 +03:00
|
|
|
return ID2SYM(shape->edge_name);
|
|
|
|
}
|
2022-12-06 14:56:51 +03:00
|
|
|
return INT2NUM(shape->capacity);
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
2022-12-06 14:56:51 +03:00
|
|
|
return Qnil;
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
2022-12-23 02:03:40 +03:00
|
|
|
/* :nodoc: */
|
2022-10-03 18:14:32 +03:00
|
|
|
static VALUE
|
|
|
|
rb_shape_export_depth(VALUE self)
|
|
|
|
{
|
|
|
|
rb_shape_t* shape;
|
2022-12-16 00:38:53 +03:00
|
|
|
shape = rb_shape_get_shape_by_id(NUM2INT(rb_struct_getmember(self, rb_intern("id"))));
|
2022-12-06 14:56:51 +03:00
|
|
|
return SIZET2NUM(rb_shape_depth(shape));
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
2022-12-23 02:03:40 +03:00
|
|
|
/* :nodoc: */
|
2022-10-03 18:14:32 +03:00
|
|
|
static VALUE
|
|
|
|
rb_shape_parent(VALUE self)
|
|
|
|
{
|
|
|
|
rb_shape_t * shape;
|
2022-12-16 00:38:53 +03:00
|
|
|
shape = rb_shape_get_shape_by_id(NUM2INT(rb_struct_getmember(self, rb_intern("id"))));
|
2022-10-03 20:52:40 +03:00
|
|
|
if (shape->parent_id != INVALID_SHAPE_ID) {
|
2022-11-10 19:36:24 +03:00
|
|
|
return rb_shape_t_to_rb_cShape(rb_shape_get_parent(shape));
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-23 02:03:40 +03:00
|
|
|
/* :nodoc: */
|
2022-10-13 00:37:02 +03:00
|
|
|
static VALUE
|
2022-10-12 12:27:23 +03:00
|
|
|
rb_shape_debug_shape(VALUE self, VALUE obj)
|
|
|
|
{
|
2022-10-03 18:14:32 +03:00
|
|
|
return rb_shape_t_to_rb_cShape(rb_shape_get_shape(obj));
|
|
|
|
}
|
|
|
|
|
2022-12-23 02:03:40 +03:00
|
|
|
/* :nodoc: */
|
2022-10-13 00:37:02 +03:00
|
|
|
static VALUE
|
2022-10-12 12:27:23 +03:00
|
|
|
rb_shape_root_shape(VALUE self)
|
|
|
|
{
|
2022-10-03 18:14:32 +03:00
|
|
|
return rb_shape_t_to_rb_cShape(rb_shape_get_root_shape());
|
|
|
|
}
|
|
|
|
|
2023-10-19 21:00:54 +03:00
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
rb_shape_shapes_available(VALUE self)
|
|
|
|
{
|
|
|
|
return INT2NUM(MAX_SHAPE_ID - (GET_SHAPE_TREE()->next_shape_id - 1));
|
|
|
|
}
|
|
|
|
|
2023-12-18 13:57:45 +03:00
|
|
|
/* :nodoc: */
|
2023-11-21 19:23:56 +03:00
|
|
|
static VALUE
|
|
|
|
rb_shape_exhaust(int argc, VALUE *argv, VALUE self)
|
|
|
|
{
|
|
|
|
rb_check_arity(argc, 0, 1);
|
|
|
|
int offset = argc == 1 ? NUM2INT(argv[0]) : 0;
|
2023-11-22 19:35:42 +03:00
|
|
|
GET_SHAPE_TREE()->next_shape_id = MAX_SHAPE_ID - offset + 1;
|
2023-11-21 19:23:56 +03:00
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
2022-10-03 18:14:32 +03:00
|
|
|
VALUE rb_obj_shape(rb_shape_t* shape);
|
|
|
|
|
|
|
|
static enum rb_id_table_iterator_result collect_keys_and_values(ID key, VALUE value, void *ref)
|
|
|
|
{
|
|
|
|
rb_hash_aset(*(VALUE *)ref, parse_key(key), rb_obj_shape((rb_shape_t*)value));
|
|
|
|
return ID_TABLE_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE edges(struct rb_id_table* edges)
|
|
|
|
{
|
|
|
|
VALUE hash = rb_hash_new();
|
2023-03-14 01:07:09 +03:00
|
|
|
if (SINGLE_CHILD_P(edges)) {
|
|
|
|
rb_shape_t * child = SINGLE_CHILD(edges);
|
|
|
|
collect_keys_and_values(child->edge_name, (VALUE)child, &hash);
|
|
|
|
}
|
|
|
|
else {
|
2022-10-03 18:14:32 +03:00
|
|
|
rb_id_table_foreach(edges, collect_keys_and_values, &hash);
|
2023-03-14 01:07:09 +03:00
|
|
|
}
|
2022-10-03 18:14:32 +03:00
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
2022-12-23 02:03:40 +03:00
|
|
|
/* :nodoc: */
|
2022-10-12 12:27:23 +03:00
|
|
|
VALUE
|
|
|
|
rb_obj_shape(rb_shape_t* shape)
|
|
|
|
{
|
2022-10-03 18:14:32 +03:00
|
|
|
VALUE rb_shape = rb_hash_new();
|
|
|
|
|
|
|
|
rb_hash_aset(rb_shape, ID2SYM(rb_intern("id")), INT2NUM(rb_shape_id(shape)));
|
|
|
|
rb_hash_aset(rb_shape, ID2SYM(rb_intern("edges")), edges(shape->edges));
|
|
|
|
|
|
|
|
if (shape == rb_shape_get_root_shape()) {
|
|
|
|
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(ROOT_SHAPE_ID));
|
|
|
|
}
|
|
|
|
else {
|
2022-10-03 20:52:40 +03:00
|
|
|
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(shape->parent_id));
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
rb_hash_aset(rb_shape, ID2SYM(rb_intern("edge_name")), rb_id2str(shape->edge_name));
|
|
|
|
return rb_shape;
|
|
|
|
}
|
|
|
|
|
2022-12-23 02:03:40 +03:00
|
|
|
/* :nodoc: */
|
2022-10-12 12:27:23 +03:00
|
|
|
static VALUE
|
|
|
|
shape_transition_tree(VALUE self)
|
|
|
|
{
|
2022-10-03 18:14:32 +03:00
|
|
|
return rb_obj_shape(rb_shape_get_root_shape());
|
|
|
|
}
|
|
|
|
|
2022-12-23 02:03:40 +03:00
|
|
|
/* :nodoc: */
|
2022-10-03 20:52:40 +03:00
|
|
|
static VALUE
|
|
|
|
rb_shape_find_by_id(VALUE mod, VALUE id)
|
|
|
|
{
|
2022-10-12 18:54:02 +03:00
|
|
|
shape_id_t shape_id = NUM2UINT(id);
|
2023-02-17 16:32:51 +03:00
|
|
|
if (shape_id >= GET_SHAPE_TREE()->next_shape_id) {
|
2022-10-03 20:52:40 +03:00
|
|
|
rb_raise(rb_eArgError, "Shape ID %d is out of bounds\n", shape_id);
|
|
|
|
}
|
|
|
|
return rb_shape_t_to_rb_cShape(rb_shape_get_shape_by_id(shape_id));
|
|
|
|
}
|
2022-10-13 00:37:02 +03:00
|
|
|
#endif
|
2022-10-03 20:52:40 +03:00
|
|
|
|
2023-02-17 18:51:16 +03:00
|
|
|
#ifdef HAVE_MMAP
|
2023-02-17 16:32:51 +03:00
|
|
|
#include <sys/mman.h>
|
2023-02-17 18:51:16 +03:00
|
|
|
#endif
|
2023-02-17 16:32:51 +03:00
|
|
|
|
2022-11-08 23:35:31 +03:00
|
|
|
void
|
|
|
|
Init_default_shapes(void)
|
|
|
|
{
|
2024-04-24 22:32:32 +03:00
|
|
|
rb_shape_tree_ptr = xcalloc(1, sizeof(rb_shape_tree_t));
|
2023-02-17 16:32:51 +03:00
|
|
|
|
|
|
|
#ifdef HAVE_MMAP
|
2024-11-19 11:07:22 +03:00
|
|
|
size_t shape_list_mmap_size = rb_size_mul_or_raise(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t), rb_eRuntimeError);
|
|
|
|
rb_shape_tree_ptr->shape_list = (rb_shape_t *)mmap(NULL, shape_list_mmap_size,
|
2023-02-17 16:32:51 +03:00
|
|
|
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
|
|
if (GET_SHAPE_TREE()->shape_list == MAP_FAILED) {
|
|
|
|
GET_SHAPE_TREE()->shape_list = 0;
|
|
|
|
}
|
2024-11-19 11:07:22 +03:00
|
|
|
else {
|
|
|
|
ruby_annotate_mmap(rb_shape_tree_ptr->shape_list, shape_list_mmap_size, "Ruby:Init_default_shapes:shape_list");
|
|
|
|
}
|
2023-02-17 16:32:51 +03:00
|
|
|
#else
|
|
|
|
GET_SHAPE_TREE()->shape_list = xcalloc(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!GET_SHAPE_TREE()->shape_list) {
|
|
|
|
rb_memerror();
|
|
|
|
}
|
|
|
|
|
2022-11-08 23:35:31 +03:00
|
|
|
id_frozen = rb_make_internal_id();
|
2022-11-18 02:57:11 +03:00
|
|
|
id_t_object = rb_make_internal_id();
|
2022-11-08 23:35:31 +03:00
|
|
|
|
2023-02-08 04:46:42 +03:00
|
|
|
#ifdef HAVE_MMAP
|
2024-11-19 11:07:22 +03:00
|
|
|
size_t shape_cache_mmap_size = rb_size_mul_or_raise(REDBLACK_CACHE_SIZE, sizeof(redblack_node_t), rb_eRuntimeError);
|
|
|
|
rb_shape_tree_ptr->shape_cache = (redblack_node_t *)mmap(NULL, shape_cache_mmap_size,
|
2023-02-08 04:46:42 +03:00
|
|
|
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
|
|
rb_shape_tree_ptr->cache_size = 0;
|
2024-01-12 03:05:21 +03:00
|
|
|
|
|
|
|
// If mmap fails, then give up on the redblack tree cache.
|
|
|
|
// We set the cache size such that the redblack node allocators think
|
|
|
|
// the cache is full.
|
|
|
|
if (GET_SHAPE_TREE()->shape_cache == MAP_FAILED) {
|
|
|
|
GET_SHAPE_TREE()->shape_cache = 0;
|
|
|
|
GET_SHAPE_TREE()->cache_size = REDBLACK_CACHE_SIZE;
|
|
|
|
}
|
2024-11-19 11:07:22 +03:00
|
|
|
else {
|
|
|
|
ruby_annotate_mmap(rb_shape_tree_ptr->shape_cache, shape_cache_mmap_size, "Ruby:Init_default_shapes:shape_cache");
|
|
|
|
}
|
2023-02-08 04:46:42 +03:00
|
|
|
#endif
|
|
|
|
|
2022-11-08 23:35:31 +03:00
|
|
|
// Root shape
|
2023-11-02 16:49:23 +03:00
|
|
|
rb_shape_t *root = rb_shape_alloc_with_parent_id(0, INVALID_SHAPE_ID);
|
|
|
|
root->capacity = 0;
|
2022-11-08 23:35:31 +03:00
|
|
|
root->type = SHAPE_ROOT;
|
Rename size_pool -> heap
Now that we've inlined the eden_heap into the size_pool, we should
rename the size_pool to heap. So that Ruby contains multiple heaps, with
different sized objects.
The term heap as a collection of memory pages is more in memory
management nomenclature, whereas size_pool was a name chosen out of
necessity during the development of the Variable Width Allocation
features of Ruby.
The concept of size pools was introduced in order to facilitate
different sized objects (other than the default 40 bytes). They wrapped
the eden heap and the tomb heap, and some related state, and provided a
reasonably simple way of duplicating all related concerns, to provide
multiple pools that all shared the same structure but held different
objects.
Since then various changes have happend in Ruby's memory layout:
* The concept of tomb heaps has been replaced by a global free pages list,
with each page having it's slot size reconfigured at the point when it
is resurrected
* the eden heap has been inlined into the size pool itself, so that now
the size pool directly controls the free_pages list, the sweeping
page, the compaction cursor and the other state that was previously
being managed by the eden heap.
Now that there is no need for a heap wrapper, we should refer to the
collection of pages containing Ruby objects as a heap again rather than
a size pool
2024-10-03 15:53:49 +03:00
|
|
|
root->heap_index = 0;
|
2023-02-17 16:32:51 +03:00
|
|
|
GET_SHAPE_TREE()->root_shape = root;
|
|
|
|
RUBY_ASSERT(rb_shape_id(GET_SHAPE_TREE()->root_shape) == ROOT_SHAPE_ID);
|
2022-11-08 23:35:31 +03:00
|
|
|
|
2022-12-09 00:48:48 +03:00
|
|
|
bool dont_care;
|
2022-11-08 23:35:31 +03:00
|
|
|
// Special const shape
|
|
|
|
#if RUBY_DEBUG
|
2024-03-12 21:27:34 +03:00
|
|
|
rb_shape_t *special_const_shape =
|
2022-11-08 23:35:31 +03:00
|
|
|
#endif
|
2023-10-24 22:05:05 +03:00
|
|
|
get_next_shape_internal(root, (ID)id_frozen, SHAPE_FROZEN, &dont_care, true);
|
2022-11-08 23:35:31 +03:00
|
|
|
RUBY_ASSERT(rb_shape_id(special_const_shape) == SPECIAL_CONST_SHAPE_ID);
|
2023-02-17 16:32:51 +03:00
|
|
|
RUBY_ASSERT(SPECIAL_CONST_SHAPE_ID == (GET_SHAPE_TREE()->next_shape_id - 1));
|
2022-11-08 23:35:31 +03:00
|
|
|
RUBY_ASSERT(rb_shape_frozen_shape_p(special_const_shape));
|
2022-12-09 01:16:52 +03:00
|
|
|
|
2024-03-12 21:27:34 +03:00
|
|
|
rb_shape_t *too_complex_shape = rb_shape_alloc_with_parent_id(0, ROOT_SHAPE_ID);
|
|
|
|
too_complex_shape->type = SHAPE_OBJ_TOO_COMPLEX;
|
Rename size_pool -> heap
Now that we've inlined the eden_heap into the size_pool, we should
rename the size_pool to heap. So that Ruby contains multiple heaps, with
different sized objects.
The term heap as a collection of memory pages is more in memory
management nomenclature, whereas size_pool was a name chosen out of
necessity during the development of the Variable Width Allocation
features of Ruby.
The concept of size pools was introduced in order to facilitate
different sized objects (other than the default 40 bytes). They wrapped
the eden heap and the tomb heap, and some related state, and provided a
reasonably simple way of duplicating all related concerns, to provide
multiple pools that all shared the same structure but held different
objects.
Since then various changes have happend in Ruby's memory layout:
* The concept of tomb heaps has been replaced by a global free pages list,
with each page having it's slot size reconfigured at the point when it
is resurrected
* the eden heap has been inlined into the size pool itself, so that now
the size pool directly controls the free_pages list, the sweeping
page, the compaction cursor and the other state that was previously
being managed by the eden heap.
Now that there is no need for a heap wrapper, we should refer to the
collection of pages containing Ruby objects as a heap again rather than
a size pool
2024-10-03 15:53:49 +03:00
|
|
|
too_complex_shape->heap_index = 0;
|
2023-02-17 16:32:51 +03:00
|
|
|
RUBY_ASSERT(OBJ_TOO_COMPLEX_SHAPE_ID == (GET_SHAPE_TREE()->next_shape_id - 1));
|
2024-03-12 21:27:34 +03:00
|
|
|
RUBY_ASSERT(rb_shape_id(too_complex_shape) == OBJ_TOO_COMPLEX_SHAPE_ID);
|
|
|
|
|
|
|
|
// Make shapes for T_OBJECT
|
Rename size_pool -> heap
Now that we've inlined the eden_heap into the size_pool, we should
rename the size_pool to heap. So that Ruby contains multiple heaps, with
different sized objects.
The term heap as a collection of memory pages is more in memory
management nomenclature, whereas size_pool was a name chosen out of
necessity during the development of the Variable Width Allocation
features of Ruby.
The concept of size pools was introduced in order to facilitate
different sized objects (other than the default 40 bytes). They wrapped
the eden heap and the tomb heap, and some related state, and provided a
reasonably simple way of duplicating all related concerns, to provide
multiple pools that all shared the same structure but held different
objects.
Since then various changes have happend in Ruby's memory layout:
* The concept of tomb heaps has been replaced by a global free pages list,
with each page having it's slot size reconfigured at the point when it
is resurrected
* the eden heap has been inlined into the size pool itself, so that now
the size pool directly controls the free_pages list, the sweeping
page, the compaction cursor and the other state that was previously
being managed by the eden heap.
Now that there is no need for a heap wrapper, we should refer to the
collection of pages containing Ruby objects as a heap again rather than
a size pool
2024-10-03 15:53:49 +03:00
|
|
|
size_t *sizes = rb_gc_heap_sizes();
|
2024-03-12 21:27:34 +03:00
|
|
|
for (int i = 0; sizes[i] > 0; i++) {
|
|
|
|
rb_shape_t *t_object_shape = rb_shape_alloc_with_parent_id(0, INVALID_SHAPE_ID);
|
|
|
|
t_object_shape->type = SHAPE_T_OBJECT;
|
Rename size_pool -> heap
Now that we've inlined the eden_heap into the size_pool, we should
rename the size_pool to heap. So that Ruby contains multiple heaps, with
different sized objects.
The term heap as a collection of memory pages is more in memory
management nomenclature, whereas size_pool was a name chosen out of
necessity during the development of the Variable Width Allocation
features of Ruby.
The concept of size pools was introduced in order to facilitate
different sized objects (other than the default 40 bytes). They wrapped
the eden heap and the tomb heap, and some related state, and provided a
reasonably simple way of duplicating all related concerns, to provide
multiple pools that all shared the same structure but held different
objects.
Since then various changes have happend in Ruby's memory layout:
* The concept of tomb heaps has been replaced by a global free pages list,
with each page having it's slot size reconfigured at the point when it
is resurrected
* the eden heap has been inlined into the size pool itself, so that now
the size pool directly controls the free_pages list, the sweeping
page, the compaction cursor and the other state that was previously
being managed by the eden heap.
Now that there is no need for a heap wrapper, we should refer to the
collection of pages containing Ruby objects as a heap again rather than
a size pool
2024-10-03 15:53:49 +03:00
|
|
|
t_object_shape->heap_index = i;
|
2024-03-12 21:27:34 +03:00
|
|
|
t_object_shape->capacity = (uint32_t)((sizes[i] - offsetof(struct RObject, as.ary)) / sizeof(VALUE));
|
|
|
|
t_object_shape->edges = rb_id_table_create(0);
|
|
|
|
t_object_shape->ancestor_index = LEAF;
|
|
|
|
RUBY_ASSERT(rb_shape_id(t_object_shape) == (shape_id_t)(i + FIRST_T_OBJECT_SHAPE_ID));
|
|
|
|
}
|
2022-11-08 23:35:31 +03:00
|
|
|
}
|
|
|
|
|
2022-10-03 18:14:32 +03:00
|
|
|
void
|
|
|
|
Init_shape(void)
|
|
|
|
{
|
2022-11-24 01:01:03 +03:00
|
|
|
#if SHAPE_DEBUG
|
2022-12-16 00:38:53 +03:00
|
|
|
VALUE rb_cShape = rb_struct_define_under(rb_cRubyVM, "Shape",
|
|
|
|
"id",
|
|
|
|
"parent_id",
|
|
|
|
"edge_name",
|
|
|
|
"next_iv_index",
|
Rename size_pool -> heap
Now that we've inlined the eden_heap into the size_pool, we should
rename the size_pool to heap. So that Ruby contains multiple heaps, with
different sized objects.
The term heap as a collection of memory pages is more in memory
management nomenclature, whereas size_pool was a name chosen out of
necessity during the development of the Variable Width Allocation
features of Ruby.
The concept of size pools was introduced in order to facilitate
different sized objects (other than the default 40 bytes). They wrapped
the eden heap and the tomb heap, and some related state, and provided a
reasonably simple way of duplicating all related concerns, to provide
multiple pools that all shared the same structure but held different
objects.
Since then various changes have happend in Ruby's memory layout:
* The concept of tomb heaps has been replaced by a global free pages list,
with each page having it's slot size reconfigured at the point when it
is resurrected
* the eden heap has been inlined into the size pool itself, so that now
the size pool directly controls the free_pages list, the sweeping
page, the compaction cursor and the other state that was previously
being managed by the eden heap.
Now that there is no need for a heap wrapper, we should refer to the
collection of pages containing Ruby objects as a heap again rather than
a size pool
2024-10-03 15:53:49 +03:00
|
|
|
"heap_index",
|
2022-12-16 00:38:53 +03:00
|
|
|
"type",
|
|
|
|
"capacity",
|
|
|
|
NULL);
|
2022-10-03 18:14:32 +03:00
|
|
|
|
|
|
|
rb_define_method(rb_cShape, "parent", rb_shape_parent, 0);
|
|
|
|
rb_define_method(rb_cShape, "edges", rb_shape_edges, 0);
|
|
|
|
rb_define_method(rb_cShape, "depth", rb_shape_export_depth, 0);
|
2022-12-09 01:16:52 +03:00
|
|
|
rb_define_method(rb_cShape, "too_complex?", rb_shape_too_complex, 0);
|
2022-10-03 18:14:32 +03:00
|
|
|
rb_define_const(rb_cShape, "SHAPE_ROOT", INT2NUM(SHAPE_ROOT));
|
|
|
|
rb_define_const(rb_cShape, "SHAPE_IVAR", INT2NUM(SHAPE_IVAR));
|
2022-11-18 02:57:11 +03:00
|
|
|
rb_define_const(rb_cShape, "SHAPE_T_OBJECT", INT2NUM(SHAPE_T_OBJECT));
|
2022-10-03 18:14:32 +03:00
|
|
|
rb_define_const(rb_cShape, "SHAPE_FROZEN", INT2NUM(SHAPE_FROZEN));
|
2022-11-18 21:29:41 +03:00
|
|
|
rb_define_const(rb_cShape, "SHAPE_ID_NUM_BITS", INT2NUM(SHAPE_ID_NUM_BITS));
|
2022-10-03 20:52:40 +03:00
|
|
|
rb_define_const(rb_cShape, "SHAPE_FLAG_SHIFT", INT2NUM(SHAPE_FLAG_SHIFT));
|
2022-11-08 23:35:31 +03:00
|
|
|
rb_define_const(rb_cShape, "SPECIAL_CONST_SHAPE_ID", INT2NUM(SPECIAL_CONST_SHAPE_ID));
|
2022-12-09 01:16:52 +03:00
|
|
|
rb_define_const(rb_cShape, "OBJ_TOO_COMPLEX_SHAPE_ID", INT2NUM(OBJ_TOO_COMPLEX_SHAPE_ID));
|
2024-03-12 21:34:17 +03:00
|
|
|
rb_define_const(rb_cShape, "FIRST_T_OBJECT_SHAPE_ID", INT2NUM(FIRST_T_OBJECT_SHAPE_ID));
|
2022-12-09 01:16:52 +03:00
|
|
|
rb_define_const(rb_cShape, "SHAPE_MAX_VARIATIONS", INT2NUM(SHAPE_MAX_VARIATIONS));
|
2023-10-26 23:28:25 +03:00
|
|
|
rb_define_const(rb_cShape, "SIZEOF_RB_SHAPE_T", INT2NUM(sizeof(rb_shape_t)));
|
|
|
|
rb_define_const(rb_cShape, "SIZEOF_REDBLACK_NODE_T", INT2NUM(sizeof(redblack_node_t)));
|
|
|
|
rb_define_const(rb_cShape, "SHAPE_BUFFER_SIZE", INT2NUM(sizeof(rb_shape_t) * SHAPE_BUFFER_SIZE));
|
|
|
|
rb_define_const(rb_cShape, "REDBLACK_CACHE_SIZE", INT2NUM(sizeof(redblack_node_t) * REDBLACK_CACHE_SIZE));
|
2022-10-03 20:52:40 +03:00
|
|
|
|
|
|
|
rb_define_singleton_method(rb_cShape, "transition_tree", shape_transition_tree, 0);
|
|
|
|
rb_define_singleton_method(rb_cShape, "find_by_id", rb_shape_find_by_id, 1);
|
|
|
|
rb_define_singleton_method(rb_cShape, "of", rb_shape_debug_shape, 1);
|
|
|
|
rb_define_singleton_method(rb_cShape, "root_shape", rb_shape_root_shape, 0);
|
2023-10-19 21:00:54 +03:00
|
|
|
rb_define_singleton_method(rb_cShape, "shapes_available", rb_shape_shapes_available, 0);
|
2023-11-21 19:23:56 +03:00
|
|
|
rb_define_singleton_method(rb_cShape, "exhaust_shapes", rb_shape_exhaust, -1);
|
2022-10-13 00:37:02 +03:00
|
|
|
#endif
|
2022-10-03 18:14:32 +03:00
|
|
|
}
|