ruby/internal.h

2686 строки
85 KiB
C
Исходник Обычный вид История

/**********************************************************************
internal.h -
$Author$
created at: Tue May 17 11:42:20 JST 2011
Copyright (C) 2011 Yukihiro Matsumoto
**********************************************************************/
#ifndef RUBY_INTERNAL_H
#define RUBY_INTERNAL_H 1
#include "ruby.h"
#if defined(__cplusplus)
extern "C" {
#if 0
} /* satisfy cc-mode */
#endif
#endif
#ifdef HAVE_STDBOOL_H
# include <stdbool.h>
#else
# include "missing/stdbool.h"
#endif
/* The most significant bit of the lower part of half-long integer.
* If sizeof(long) == 4, this is 0x8000.
* If sizeof(long) == 8, this is 0x80000000.
*/
#define HALF_LONG_MSB ((SIGNED_VALUE)1<<((SIZEOF_LONG*CHAR_BIT-1)/2))
#define LIKELY(x) RB_LIKELY(x)
#define UNLIKELY(x) RB_UNLIKELY(x)
#ifndef MAYBE_UNUSED
# define MAYBE_UNUSED(x) x
#endif
#ifndef WARN_UNUSED_RESULT
# define WARN_UNUSED_RESULT(x) x
#endif
#ifndef __has_feature
# define __has_feature(x) 0
#endif
#ifndef __has_extension
# define __has_extension __has_feature
#endif
#if 0
#elif defined(NO_SANITIZE) && __has_feature(memory_sanitizer)
# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(x) \
NO_SANITIZE("memory", NO_SANITIZE("address", NOINLINE(x)))
#elif defined(NO_SANITIZE)
# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(x) \
NO_SANITIZE("address", NOINLINE(x))
#elif defined(NO_SANITIZE_ADDRESS)
# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(x) \
NO_SANITIZE_ADDRESS(NOINLINE(x))
#elif defined(NO_ADDRESS_SAFETY_ANALYSIS)
# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(x) \
NO_ADDRESS_SAFETY_ANALYSIS(NOINLINE(x))
#else
# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(x) x
#endif
#if defined(NO_SANITIZE) && defined(__GNUC__) &&! defined(__clang__)
/* GCC warns about unknown sanitizer, which is annoying. */
#undef NO_SANITIZE
#define NO_SANITIZE(x, y) \
COMPILER_WARNING_PUSH; \
COMPILER_WARNING_IGNORED(-Wattributes); \
__attribute__((__no_sanitize__(x))) y; \
COMPILER_WARNING_POP
#endif
#ifndef NO_SANITIZE
# define NO_SANITIZE(x, y) y
#endif
#ifdef HAVE_VALGRIND_MEMCHECK_H
# include <valgrind/memcheck.h>
# ifndef VALGRIND_MAKE_MEM_DEFINED
# define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
# endif
# ifndef VALGRIND_MAKE_MEM_UNDEFINED
# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
# endif
#else
# define VALGRIND_MAKE_MEM_DEFINED(p, n) 0
# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) 0
#endif
#define numberof(array) ((int)(sizeof(array) / sizeof((array)[0])))
#ifndef MJIT_HEADER
#ifdef HAVE_SANITIZER_ASAN_INTERFACE_H
# include <sanitizer/asan_interface.h>
#endif
#if !__has_feature(address_sanitizer)
# define __asan_poison_memory_region(x, y)
# define __asan_unpoison_memory_region(x, y)
# define __asan_region_is_poisoned(x, y) 0
#endif
#ifdef HAVE_SANITIZER_MSAN_INTERFACE_H
# if __has_feature(memory_sanitizer)
# include <sanitizer/msan_interface.h>
# endif
#endif
#if !__has_feature(memory_sanitizer)
# define __msan_allocated_memory(x, y) ((void)(x), (void)(y))
# define __msan_poison(x, y) ((void)(x), (void)(y))
# define __msan_unpoison(x, y) ((void)(x), (void)(y))
# define __msan_unpoison_string(x) ((void)(x))
#endif
/*!
* This function asserts that a (continuous) memory region from ptr to size
* being "poisoned". Both read / write access to such memory region are
* prohibited until properly unpoisoned. The region must be previously
* allocated (do not pass a freed pointer here), but not necessarily be an
* entire object that the malloc returns. You can punch hole a part of a
* gigantic heap arena. This is handy when you do not free an allocated memory
* region to reuse later: poison when you keep it unused, and unpoison when you
* reuse.
*
* \param[in] ptr pointer to the beginning of the memory region to poison.
2019-05-24 08:31:53 +03:00
* \param[in] size the length of the memory region to poison.
*/
static inline void
asan_poison_memory_region(const volatile void *ptr, size_t size)
{
__msan_poison(ptr, size);
__asan_poison_memory_region(ptr, size);
}
/*!
* This is a variant of asan_poison_memory_region that takes a VALUE.
*
* \param[in] obj target object.
*/
static inline void
asan_poison_object(VALUE obj)
{
MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
asan_poison_memory_region(ptr, SIZEOF_VALUE);
}
#if !__has_feature(address_sanitizer)
#define asan_poison_object_if(ptr, obj) ((void)(ptr), (void)(obj))
#else
#define asan_poison_object_if(ptr, obj) do { \
if (ptr) asan_poison_object(obj); \
} while (0)
#endif
/*!
* This function predicates if the given object is fully addressable or not.
*
* \param[in] obj target object.
* \retval 0 the given object is fully addressable.
* \retval otherwise pointer to first such byte who is poisoned.
*/
static inline void *
asan_poisoned_object_p(VALUE obj)
{
MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
return __asan_region_is_poisoned(ptr, SIZEOF_VALUE);
}
/*!
* This function asserts that a (formally poisoned) memory region from ptr to
* size is now addressable. Write access to such memory region gets allowed.
* However read access might or might not be possible depending on situations,
* because the region can have contents of previous usages. That information
* should be passed by the malloc_p flag. If that is true, the contents of the
* region is _not_ fully defined (like the return value of malloc behaves).
* Reading from there is NG; write something first. If malloc_p is false on
* the other hand, that memory region is fully defined and can be read
* immediately.
*
* \param[in] ptr pointer to the beginning of the memory region to unpoison.
2019-05-24 08:31:53 +03:00
* \param[in] size the length of the memory region.
* \param[in] malloc_p if the memory region is like a malloc's return value or not.
*/
static inline void
asan_unpoison_memory_region(const volatile void *ptr, size_t size, bool malloc_p)
{
__asan_unpoison_memory_region(ptr, size);
if (malloc_p) {
__msan_allocated_memory(ptr, size);
}
else {
__msan_unpoison(ptr, size);
}
}
/*!
* This is a variant of asan_unpoison_memory_region that takes a VALUE.
*
* \param[in] obj target object.
* \param[in] malloc_p if the memory region is like a malloc's return value or not.
*/
static inline void
asan_unpoison_object(VALUE obj, bool newobj_p)
{
MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
asan_unpoison_memory_region(ptr, SIZEOF_VALUE, newobj_p);
}
#endif
/* Prevent compiler from reordering access */
#define ACCESS_ONCE(type,x) (*((volatile type *)&(x)))
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
# define STATIC_ASSERT(name, expr) _Static_assert(expr, #name ": " #expr)
#elif GCC_VERSION_SINCE(4, 6, 0) || __has_extension(c_static_assert)
# define STATIC_ASSERT(name, expr) RB_GNUC_EXTENSION _Static_assert(expr, #name ": " #expr)
#else
# define STATIC_ASSERT(name, expr) typedef int static_assert_##name##_check[1 - 2*!(expr)]
#endif
#define SIGNED_INTEGER_TYPE_P(int_type) (0 > ((int_type)0)-1)
#define SIGNED_INTEGER_MAX(sint_type) \
(sint_type) \
((((sint_type)1) << (sizeof(sint_type) * CHAR_BIT - 2)) | \
((((sint_type)1) << (sizeof(sint_type) * CHAR_BIT - 2)) - 1))
#define SIGNED_INTEGER_MIN(sint_type) (-SIGNED_INTEGER_MAX(sint_type)-1)
#define UNSIGNED_INTEGER_MAX(uint_type) (~(uint_type)0)
#if SIGNEDNESS_OF_TIME_T < 0 /* signed */
# define TIMET_MAX SIGNED_INTEGER_MAX(time_t)
# define TIMET_MIN SIGNED_INTEGER_MIN(time_t)
#elif SIGNEDNESS_OF_TIME_T > 0 /* unsigned */
# define TIMET_MAX UNSIGNED_INTEGER_MAX(time_t)
# define TIMET_MIN ((time_t)0)
#endif
#define TIMET_MAX_PLUS_ONE (2*(double)(TIMET_MAX/2+1))
#ifdef HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW_P
#define MUL_OVERFLOW_P(a, b) \
__builtin_mul_overflow_p((a), (b), (__typeof__(a * b))0)
#elif defined HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW
#define MUL_OVERFLOW_P(a, b) \
RB_GNUC_EXTENSION_BLOCK(__typeof__(a) c; __builtin_mul_overflow((a), (b), &c))
#endif
#define MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
(a) == 0 ? 0 : \
(a) == -1 ? (b) < -(max) : \
(a) > 0 ? \
((b) > 0 ? (max) / (a) < (b) : (min) / (a) > (b)) : \
((b) > 0 ? (min) / (a) < (b) : (max) / (a) > (b)))
#ifdef HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW_P
/* __builtin_mul_overflow_p can take bitfield */
/* and GCC permits bitfields for integers other than int */
#define MUL_OVERFLOW_FIXNUM_P(a, b) RB_GNUC_EXTENSION_BLOCK( \
struct { long fixnum : SIZEOF_LONG * CHAR_BIT - 1; } c; \
__builtin_mul_overflow_p((a), (b), c.fixnum); \
)
#else
#define MUL_OVERFLOW_FIXNUM_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
#endif
#ifdef MUL_OVERFLOW_P
#define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
#define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
#define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_P(a, b)
#else
#define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
#define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
#define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
#endif
#ifndef swap16
# ifdef HAVE_BUILTIN___BUILTIN_BSWAP16
# define swap16(x) __builtin_bswap16(x)
# endif
#endif
#ifndef swap16
# define swap16(x) ((uint16_t)((((x)&0xFF)<<8) | (((x)>>8)&0xFF)))
#endif
#ifndef swap32
# ifdef HAVE_BUILTIN___BUILTIN_BSWAP32
# define swap32(x) __builtin_bswap32(x)
# endif
#endif
#ifndef swap32
# define swap32(x) ((uint32_t)((((x)&0xFF)<<24) \
|(((x)>>24)&0xFF) \
|(((x)&0x0000FF00)<<8) \
|(((x)&0x00FF0000)>>8) ))
#endif
#ifndef swap64
# ifdef HAVE_BUILTIN___BUILTIN_BSWAP64
# define swap64(x) __builtin_bswap64(x)
# endif
#endif
#ifndef swap64
# ifdef HAVE_INT64_T
# define byte_in_64bit(n) ((uint64_t)0xff << (n))
# define swap64(x) ((uint64_t)((((x)&byte_in_64bit(0))<<56) \
|(((x)>>56)&0xFF) \
|(((x)&byte_in_64bit(8))<<40) \
|(((x)&byte_in_64bit(48))>>40) \
|(((x)&byte_in_64bit(16))<<24) \
|(((x)&byte_in_64bit(40))>>24) \
|(((x)&byte_in_64bit(24))<<8) \
|(((x)&byte_in_64bit(32))>>8)))
# endif
#endif
static inline unsigned int
nlz_int(unsigned int x)
{
#if defined(HAVE_BUILTIN___BUILTIN_CLZ)
if (x == 0) return SIZEOF_INT * CHAR_BIT;
return (unsigned int)__builtin_clz(x);
#else
unsigned int y;
# if 64 < SIZEOF_INT * CHAR_BIT
unsigned int n = 128;
# elif 32 < SIZEOF_INT * CHAR_BIT
unsigned int n = 64;
# else
unsigned int n = 32;
# endif
# if 64 < SIZEOF_INT * CHAR_BIT
y = x >> 64; if (y) {n -= 64; x = y;}
# endif
# if 32 < SIZEOF_INT * CHAR_BIT
y = x >> 32; if (y) {n -= 32; x = y;}
# endif
y = x >> 16; if (y) {n -= 16; x = y;}
y = x >> 8; if (y) {n -= 8; x = y;}
y = x >> 4; if (y) {n -= 4; x = y;}
y = x >> 2; if (y) {n -= 2; x = y;}
y = x >> 1; if (y) {return n - 2;}
return (unsigned int)(n - x);
#endif
}
static inline unsigned int
nlz_long(unsigned long x)
{
#if defined(HAVE_BUILTIN___BUILTIN_CLZL)
if (x == 0) return SIZEOF_LONG * CHAR_BIT;
return (unsigned int)__builtin_clzl(x);
#else
unsigned long y;
# if 64 < SIZEOF_LONG * CHAR_BIT
unsigned int n = 128;
# elif 32 < SIZEOF_LONG * CHAR_BIT
unsigned int n = 64;
# else
unsigned int n = 32;
# endif
# if 64 < SIZEOF_LONG * CHAR_BIT
y = x >> 64; if (y) {n -= 64; x = y;}
# endif
# if 32 < SIZEOF_LONG * CHAR_BIT
y = x >> 32; if (y) {n -= 32; x = y;}
# endif
y = x >> 16; if (y) {n -= 16; x = y;}
y = x >> 8; if (y) {n -= 8; x = y;}
y = x >> 4; if (y) {n -= 4; x = y;}
y = x >> 2; if (y) {n -= 2; x = y;}
y = x >> 1; if (y) {return n - 2;}
return (unsigned int)(n - x);
#endif
}
#ifdef HAVE_LONG_LONG
static inline unsigned int
nlz_long_long(unsigned LONG_LONG x)
{
#if defined(HAVE_BUILTIN___BUILTIN_CLZLL)
if (x == 0) return SIZEOF_LONG_LONG * CHAR_BIT;
return (unsigned int)__builtin_clzll(x);
#else
unsigned LONG_LONG y;
# if 64 < SIZEOF_LONG_LONG * CHAR_BIT
unsigned int n = 128;
# elif 32 < SIZEOF_LONG_LONG * CHAR_BIT
unsigned int n = 64;
# else
unsigned int n = 32;
# endif
# if 64 < SIZEOF_LONG_LONG * CHAR_BIT
y = x >> 64; if (y) {n -= 64; x = y;}
# endif
# if 32 < SIZEOF_LONG_LONG * CHAR_BIT
y = x >> 32; if (y) {n -= 32; x = y;}
# endif
y = x >> 16; if (y) {n -= 16; x = y;}
y = x >> 8; if (y) {n -= 8; x = y;}
y = x >> 4; if (y) {n -= 4; x = y;}
y = x >> 2; if (y) {n -= 2; x = y;}
y = x >> 1; if (y) {return n - 2;}
return (unsigned int)(n - x);
#endif
}
#endif
#ifdef HAVE_UINT128_T
static inline unsigned int
nlz_int128(uint128_t x)
{
uint128_t y;
unsigned int n = 128;
y = x >> 64; if (y) {n -= 64; x = y;}
y = x >> 32; if (y) {n -= 32; x = y;}
y = x >> 16; if (y) {n -= 16; x = y;}
y = x >> 8; if (y) {n -= 8; x = y;}
y = x >> 4; if (y) {n -= 4; x = y;}
y = x >> 2; if (y) {n -= 2; x = y;}
y = x >> 1; if (y) {return n - 2;}
return (unsigned int)(n - x);
}
#endif
static inline unsigned int
nlz_intptr(uintptr_t x)
{
#if SIZEOF_UINTPTR_T == SIZEOF_INT
return nlz_int(x);
#elif SIZEOF_UINTPTR_T == SIZEOF_LONG
return nlz_long(x);
#elif SIZEOF_UINTPTR_T == SIZEOF_LONG_LONG
return nlz_long_long(x);
#else
#error no known integer type corresponds uintptr_t
return /* sane compiler */ ~0;
#endif
}
static inline unsigned int
rb_popcount32(uint32_t x)
{
#ifdef HAVE_BUILTIN___BUILTIN_POPCOUNT
return (unsigned int)__builtin_popcount(x);
#else
x = (x & 0x55555555) + (x >> 1 & 0x55555555);
x = (x & 0x33333333) + (x >> 2 & 0x33333333);
x = (x & 0x0f0f0f0f) + (x >> 4 & 0x0f0f0f0f);
x = (x & 0x001f001f) + (x >> 8 & 0x001f001f);
return (x & 0x0000003f) + (x >>16 & 0x0000003f);
#endif
}
static inline int
rb_popcount64(uint64_t x)
{
#ifdef HAVE_BUILTIN___BUILTIN_POPCOUNT
return __builtin_popcountll(x);
#else
x = (x & 0x5555555555555555) + (x >> 1 & 0x5555555555555555);
x = (x & 0x3333333333333333) + (x >> 2 & 0x3333333333333333);
x = (x & 0x0707070707070707) + (x >> 4 & 0x0707070707070707);
x = (x & 0x001f001f001f001f) + (x >> 8 & 0x001f001f001f001f);
x = (x & 0x0000003f0000003f) + (x >>16 & 0x0000003f0000003f);
return (x & 0x7f) + (x >>32 & 0x7f);
#endif
}
static inline int
rb_popcount_intptr(uintptr_t x)
{
#if SIZEOF_VOIDP == 8
return rb_popcount64(x);
#elif SIZEOF_VOIDP == 4
return rb_popcount32(x);
#endif
}
static inline int
ntz_int32(uint32_t x)
{
#ifdef HAVE_BUILTIN___BUILTIN_CTZ
return __builtin_ctz(x);
#else
return rb_popcount32((~x) & (x-1));
#endif
}
static inline int
ntz_int64(uint64_t x)
{
#ifdef HAVE_BUILTIN___BUILTIN_CTZLL
return __builtin_ctzll(x);
#else
return rb_popcount64((~x) & (x-1));
#endif
}
static inline int
ntz_intptr(uintptr_t x)
{
#if SIZEOF_VOIDP == 8
return ntz_int64(x);
#elif SIZEOF_VOIDP == 4
return ntz_int32(x);
#endif
}
#if HAVE_LONG_LONG && SIZEOF_LONG * 2 <= SIZEOF_LONG_LONG
# define DLONG LONG_LONG
# define DL2NUM(x) LL2NUM(x)
#elif defined(HAVE_INT128_T)
# define DLONG int128_t
# define DL2NUM(x) (RB_FIXABLE(x) ? LONG2FIX(x) : rb_int128t2big(x))
VALUE rb_int128t2big(int128_t n);
#endif
static inline long
rb_overflowed_fix_to_int(long x)
{
return (long)((unsigned long)(x >> 1) ^ (1LU << (SIZEOF_LONG * CHAR_BIT - 1)));
}
static inline VALUE
rb_fix_plus_fix(VALUE x, VALUE y)
{
#ifdef HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW
long lz;
/* NOTE
* (1) `LONG2FIX(FIX2LONG(x)+FIX2LONG(y))`
+ = `((lx*2+1)/2 + (ly*2+1)/2)*2+1`
+ = `lx*2 + ly*2 + 1`
+ = `(lx*2+1) + (ly*2+1) - 1`
+ = `x + y - 1`
* (2) Fixnum's LSB is always 1.
* It means you can always run `x - 1` without overflow.
* (3) Of course `z = x + (y-1)` may overflow.
* At that time true value is
* * positive: 0b0 1xxx...1, and z = 0b1xxx...1
* * nevative: 0b1 0xxx...1, and z = 0b0xxx...1
* To convert this true value to long,
* (a) Use arithmetic shift
* * positive: 0b11xxx...
* * negative: 0b00xxx...
* (b) invert MSB
* * positive: 0b01xxx...
* * negative: 0b10xxx...
*/
if (__builtin_add_overflow((long)x, (long)y-1, &lz)) {
return rb_int2big(rb_overflowed_fix_to_int(lz));
}
else {
return (VALUE)lz;
}
#else
long lz = FIX2LONG(x) + FIX2LONG(y);
return LONG2NUM(lz);
#endif
}
static inline VALUE
rb_fix_minus_fix(VALUE x, VALUE y)
{
#ifdef HAVE_BUILTIN___BUILTIN_SUB_OVERFLOW
long lz;
if (__builtin_sub_overflow((long)x, (long)y-1, &lz)) {
return rb_int2big(rb_overflowed_fix_to_int(lz));
}
else {
return (VALUE)lz;
}
#else
long lz = FIX2LONG(x) - FIX2LONG(y);
return LONG2NUM(lz);
#endif
}
/* arguments must be Fixnum */
static inline VALUE
rb_fix_mul_fix(VALUE x, VALUE y)
{
long lx = FIX2LONG(x);
long ly = FIX2LONG(y);
#ifdef DLONG
return DL2NUM((DLONG)lx * (DLONG)ly);
#else
if (MUL_OVERFLOW_FIXNUM_P(lx, ly)) {
return rb_big_mul(rb_int2big(lx), rb_int2big(ly));
}
else {
return LONG2FIX(lx * ly);
}
#endif
}
/*
* This behaves different from C99 for negative arguments.
* Note that div may overflow fixnum.
*/
static inline void
rb_fix_divmod_fix(VALUE a, VALUE b, VALUE *divp, VALUE *modp)
{
/* assume / and % comply C99.
* ldiv(3) won't be inlined by GCC and clang.
* I expect / and % are compiled as single idiv.
*/
long x = FIX2LONG(a);
long y = FIX2LONG(b);
long div, mod;
if (x == FIXNUM_MIN && y == -1) {
if (divp) *divp = LONG2NUM(-FIXNUM_MIN);
if (modp) *modp = LONG2FIX(0);
return;
}
div = x / y;
mod = x % y;
if (y > 0 ? mod < 0 : mod > 0) {
mod += y;
div -= 1;
}
if (divp) *divp = LONG2FIX(div);
if (modp) *modp = LONG2FIX(mod);
}
/* div() for Ruby
* This behaves different from C99 for negative arguments.
*/
static inline VALUE
rb_fix_div_fix(VALUE x, VALUE y)
{
VALUE div;
rb_fix_divmod_fix(x, y, &div, NULL);
return div;
}
/* mod() for Ruby
* This behaves different from C99 for negative arguments.
*/
static inline VALUE
rb_fix_mod_fix(VALUE x, VALUE y)
{
VALUE mod;
rb_fix_divmod_fix(x, y, NULL, &mod);
return mod;
}
#if defined(HAVE_UINT128_T) && defined(HAVE_LONG_LONG)
# define bit_length(x) \
(unsigned int) \
(sizeof(x) <= SIZEOF_INT ? SIZEOF_INT * CHAR_BIT - nlz_int((unsigned int)(x)) : \
sizeof(x) <= SIZEOF_LONG ? SIZEOF_LONG * CHAR_BIT - nlz_long((unsigned long)(x)) : \
sizeof(x) <= SIZEOF_LONG_LONG ? SIZEOF_LONG_LONG * CHAR_BIT - nlz_long_long((unsigned LONG_LONG)(x)) : \
SIZEOF_INT128_T * CHAR_BIT - nlz_int128((uint128_t)(x)))
#elif defined(HAVE_UINT128_T)
# define bit_length(x) \
(unsigned int) \
(sizeof(x) <= SIZEOF_INT ? SIZEOF_INT * CHAR_BIT - nlz_int((unsigned int)(x)) : \
sizeof(x) <= SIZEOF_LONG ? SIZEOF_LONG * CHAR_BIT - nlz_long((unsigned long)(x)) : \
SIZEOF_INT128_T * CHAR_BIT - nlz_int128((uint128_t)(x)))
#elif defined(HAVE_LONG_LONG)
# define bit_length(x) \
(unsigned int) \
(sizeof(x) <= SIZEOF_INT ? SIZEOF_INT * CHAR_BIT - nlz_int((unsigned int)(x)) : \
sizeof(x) <= SIZEOF_LONG ? SIZEOF_LONG * CHAR_BIT - nlz_long((unsigned long)(x)) : \
SIZEOF_LONG_LONG * CHAR_BIT - nlz_long_long((unsigned LONG_LONG)(x)))
#else
# define bit_length(x) \
(unsigned int) \
(sizeof(x) <= SIZEOF_INT ? SIZEOF_INT * CHAR_BIT - nlz_int((unsigned int)(x)) : \
SIZEOF_LONG * CHAR_BIT - nlz_long((unsigned long)(x)))
#endif
#ifndef BDIGIT
# if SIZEOF_INT*2 <= SIZEOF_LONG_LONG
# define BDIGIT unsigned int
# define SIZEOF_BDIGIT SIZEOF_INT
# define BDIGIT_DBL unsigned LONG_LONG
# define BDIGIT_DBL_SIGNED LONG_LONG
# define PRI_BDIGIT_PREFIX ""
# define PRI_BDIGIT_DBL_PREFIX PRI_LL_PREFIX
# elif SIZEOF_INT*2 <= SIZEOF_LONG
# define BDIGIT unsigned int
# define SIZEOF_BDIGIT SIZEOF_INT
# define BDIGIT_DBL unsigned long
# define BDIGIT_DBL_SIGNED long
# define PRI_BDIGIT_PREFIX ""
# define PRI_BDIGIT_DBL_PREFIX "l"
# elif SIZEOF_SHORT*2 <= SIZEOF_LONG
# define BDIGIT unsigned short
# define SIZEOF_BDIGIT SIZEOF_SHORT
# define BDIGIT_DBL unsigned long
# define BDIGIT_DBL_SIGNED long
# define PRI_BDIGIT_PREFIX "h"
# define PRI_BDIGIT_DBL_PREFIX "l"
# else
# define BDIGIT unsigned short
# define SIZEOF_BDIGIT (SIZEOF_LONG/2)
# define SIZEOF_ACTUAL_BDIGIT SIZEOF_LONG
# define BDIGIT_DBL unsigned long
# define BDIGIT_DBL_SIGNED long
# define PRI_BDIGIT_PREFIX "h"
# define PRI_BDIGIT_DBL_PREFIX "l"
# endif
#endif
#ifndef SIZEOF_ACTUAL_BDIGIT
# define SIZEOF_ACTUAL_BDIGIT SIZEOF_BDIGIT
#endif
#ifdef PRI_BDIGIT_PREFIX
# define PRIdBDIGIT PRI_BDIGIT_PREFIX"d"
# define PRIiBDIGIT PRI_BDIGIT_PREFIX"i"
# define PRIoBDIGIT PRI_BDIGIT_PREFIX"o"
# define PRIuBDIGIT PRI_BDIGIT_PREFIX"u"
# define PRIxBDIGIT PRI_BDIGIT_PREFIX"x"
# define PRIXBDIGIT PRI_BDIGIT_PREFIX"X"
#endif
#ifdef PRI_BDIGIT_DBL_PREFIX
# define PRIdBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"d"
# define PRIiBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"i"
# define PRIoBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"o"
# define PRIuBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"u"
# define PRIxBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"x"
# define PRIXBDIGIT_DBL PRI_BDIGIT_DBL_PREFIX"X"
#endif
#define BIGNUM_EMBED_LEN_NUMBITS 3
#ifndef BIGNUM_EMBED_LEN_MAX
# if (SIZEOF_VALUE*RVALUE_EMBED_LEN_MAX/SIZEOF_ACTUAL_BDIGIT) < (1 << BIGNUM_EMBED_LEN_NUMBITS)-1
# define BIGNUM_EMBED_LEN_MAX (SIZEOF_VALUE*RVALUE_EMBED_LEN_MAX/SIZEOF_ACTUAL_BDIGIT)
# else
# define BIGNUM_EMBED_LEN_MAX ((1 << BIGNUM_EMBED_LEN_NUMBITS)-1)
# endif
#endif
struct RBignum {
struct RBasic basic;
union {
struct {
size_t len;
BDIGIT *digits;
} heap;
BDIGIT ary[BIGNUM_EMBED_LEN_MAX];
} as;
};
#define BIGNUM_SIGN_BIT ((VALUE)FL_USER1)
/* sign: positive:1, negative:0 */
#define BIGNUM_SIGN(b) ((RBASIC(b)->flags & BIGNUM_SIGN_BIT) != 0)
#define BIGNUM_SET_SIGN(b,sign) \
((sign) ? (RBASIC(b)->flags |= BIGNUM_SIGN_BIT) \
: (RBASIC(b)->flags &= ~BIGNUM_SIGN_BIT))
#define BIGNUM_POSITIVE_P(b) BIGNUM_SIGN(b)
#define BIGNUM_NEGATIVE_P(b) (!BIGNUM_SIGN(b))
#define BIGNUM_NEGATE(b) (RBASIC(b)->flags ^= BIGNUM_SIGN_BIT)
#define BIGNUM_EMBED_FLAG ((VALUE)FL_USER2)
#define BIGNUM_EMBED_LEN_MASK \
(~(~(VALUE)0U << BIGNUM_EMBED_LEN_NUMBITS) << BIGNUM_EMBED_LEN_SHIFT)
#define BIGNUM_EMBED_LEN_SHIFT \
(FL_USHIFT+3) /* bit offset of BIGNUM_EMBED_LEN_MASK */
#define BIGNUM_LEN(b) \
((RBASIC(b)->flags & BIGNUM_EMBED_FLAG) ? \
(size_t)((RBASIC(b)->flags >> BIGNUM_EMBED_LEN_SHIFT) & \
(BIGNUM_EMBED_LEN_MASK >> BIGNUM_EMBED_LEN_SHIFT)) : \
RBIGNUM(b)->as.heap.len)
/* LSB:BIGNUM_DIGITS(b)[0], MSB:BIGNUM_DIGITS(b)[BIGNUM_LEN(b)-1] */
#define BIGNUM_DIGITS(b) \
((RBASIC(b)->flags & BIGNUM_EMBED_FLAG) ? \
RBIGNUM(b)->as.ary : \
RBIGNUM(b)->as.heap.digits)
#define BIGNUM_LENINT(b) rb_long2int(BIGNUM_LEN(b))
#define RBIGNUM(obj) (R_CAST(RBignum)(obj))
struct RRational {
struct RBasic basic;
VALUE num;
VALUE den;
};
#define RRATIONAL(obj) (R_CAST(RRational)(obj))
#define RRATIONAL_SET_NUM(rat, n) RB_OBJ_WRITE((rat), &((struct RRational *)(rat))->num,(n))
#define RRATIONAL_SET_DEN(rat, d) RB_OBJ_WRITE((rat), &((struct RRational *)(rat))->den,(d))
struct RFloat {
struct RBasic basic;
double float_value;
};
#define RFLOAT(obj) (R_CAST(RFloat)(obj))
struct RComplex {
struct RBasic basic;
VALUE real;
VALUE imag;
};
#define RCOMPLEX(obj) (R_CAST(RComplex)(obj))
/* shortcut macro for internal only */
#define RCOMPLEX_SET_REAL(cmp, r) RB_OBJ_WRITE((cmp), &((struct RComplex *)(cmp))->real,(r))
#define RCOMPLEX_SET_IMAG(cmp, i) RB_OBJ_WRITE((cmp), &((struct RComplex *)(cmp))->imag,(i))
enum ruby_rhash_flags {
RHASH_PASS_AS_KEYWORDS = FL_USER1, /* FL 1 */
RHASH_PROC_DEFAULT = FL_USER2, /* FL 2 */
RHASH_ST_TABLE_FLAG = FL_USER3, /* FL 3 */
#define RHASH_AR_TABLE_MAX_SIZE SIZEOF_VALUE
RHASH_AR_TABLE_SIZE_MASK = (FL_USER4|FL_USER5|FL_USER6|FL_USER7), /* FL 4..7 */
RHASH_AR_TABLE_SIZE_SHIFT = (FL_USHIFT+4),
RHASH_AR_TABLE_BOUND_MASK = (FL_USER8|FL_USER9|FL_USER10|FL_USER11), /* FL 8..11 */
RHASH_AR_TABLE_BOUND_SHIFT = (FL_USHIFT+8),
// we can not put it in "enum" because it can exceed "int" range.
#define RHASH_LEV_MASK (FL_USER13 | FL_USER14 | FL_USER15 | /* FL 13..19 */ \
FL_USER16 | FL_USER17 | FL_USER18 | FL_USER19)
#if USE_TRANSIENT_HEAP
RHASH_TRANSIENT_FLAG = FL_USER12, /* FL 12 */
#endif
RHASH_LEV_SHIFT = (FL_USHIFT + 13),
RHASH_LEV_MAX = 127, /* 7 bits */
RHASH_ENUM_END
};
#define RHASH_AR_TABLE_SIZE_RAW(h) \
((unsigned int)((RBASIC(h)->flags & RHASH_AR_TABLE_SIZE_MASK) >> RHASH_AR_TABLE_SIZE_SHIFT))
int rb_hash_ar_table_p(VALUE hash);
struct ar_table_struct *rb_hash_ar_table(VALUE hash);
st_table *rb_hash_st_table(VALUE hash);
void rb_hash_st_table_set(VALUE hash, st_table *st);
#if 0 /* for debug */
#define RHASH_AR_TABLE_P(hash) rb_hash_ar_table_p(hash)
#define RHASH_AR_TABLE(h) rb_hash_ar_table(h)
#define RHASH_ST_TABLE(h) rb_hash_st_table(h)
#else
#define RHASH_AR_TABLE_P(hash) (!FL_TEST_RAW((hash), RHASH_ST_TABLE_FLAG))
#define RHASH_AR_TABLE(hash) (RHASH(hash)->as.ar)
#define RHASH_ST_TABLE(hash) (RHASH(hash)->as.st)
#endif
#define RHASH(obj) (R_CAST(RHash)(obj))
#define RHASH_ST_SIZE(h) (RHASH_ST_TABLE(h)->num_entries)
#define RHASH_ST_TABLE_P(h) (!RHASH_AR_TABLE_P(h))
#define RHASH_ST_CLEAR(h) (FL_UNSET_RAW(h, RHASH_ST_TABLE_FLAG), RHASH(h)->as.ar = NULL)
#define RHASH_AR_TABLE_SIZE_MASK (VALUE)RHASH_AR_TABLE_SIZE_MASK
#define RHASH_AR_TABLE_SIZE_SHIFT RHASH_AR_TABLE_SIZE_SHIFT
#define RHASH_AR_TABLE_BOUND_MASK (VALUE)RHASH_AR_TABLE_BOUND_MASK
#define RHASH_AR_TABLE_BOUND_SHIFT RHASH_AR_TABLE_BOUND_SHIFT
#if USE_TRANSIENT_HEAP
#define RHASH_TRANSIENT_P(hash) FL_TEST_RAW((hash), RHASH_TRANSIENT_FLAG)
#define RHASH_SET_TRANSIENT_FLAG(h) FL_SET_RAW(h, RHASH_TRANSIENT_FLAG)
#define RHASH_UNSET_TRANSIENT_FLAG(h) FL_UNSET_RAW(h, RHASH_TRANSIENT_FLAG)
#else
#define RHASH_TRANSIENT_P(hash) 0
#define RHASH_SET_TRANSIENT_FLAG(h) ((void)0)
#define RHASH_UNSET_TRANSIENT_FLAG(h) ((void)0)
#endif
#if SIZEOF_VALUE / RHASH_AR_TABLE_MAX_SIZE == 2
typedef uint16_t ar_hint_t;
#elif SIZEOF_VALUE / RHASH_AR_TABLE_MAX_SIZE == 1
typedef unsigned char ar_hint_t;
#else
#error unsupported
#endif
struct RHash {
struct RBasic basic;
union {
st_table *st;
struct ar_table_struct *ar; /* possibly 0 */
} as;
const VALUE ifnone;
union {
ar_hint_t ary[RHASH_AR_TABLE_MAX_SIZE];
VALUE word;
} ar_hint;
};
#ifdef RHASH_IFNONE
# undef RHASH_IFNONE
# undef RHASH_SIZE
# define RHASH_IFNONE(h) (RHASH(h)->ifnone)
# define RHASH_SIZE(h) (RHASH_AR_TABLE_P(h) ? RHASH_AR_TABLE_SIZE_RAW(h) : RHASH_ST_SIZE(h))
#endif /* ifdef RHASH_IFNONE */
struct RMoved {
VALUE flags;
VALUE destination;
2019-05-10 01:04:35 +03:00
VALUE next;
};
/* missing/setproctitle.c */
#ifndef HAVE_SETPROCTITLE
extern void ruby_init_setproctitle(int argc, char *argv[]);
#endif
#define RSTRUCT_EMBED_LEN_MAX RSTRUCT_EMBED_LEN_MAX
#define RSTRUCT_EMBED_LEN_MASK RSTRUCT_EMBED_LEN_MASK
#define RSTRUCT_EMBED_LEN_SHIFT RSTRUCT_EMBED_LEN_SHIFT
enum {
RSTRUCT_EMBED_LEN_MAX = RVALUE_EMBED_LEN_MAX,
RSTRUCT_EMBED_LEN_MASK = (RUBY_FL_USER2|RUBY_FL_USER1),
RSTRUCT_EMBED_LEN_SHIFT = (RUBY_FL_USHIFT+1),
RSTRUCT_TRANSIENT_FLAG = FL_USER3,
RSTRUCT_ENUM_END
};
#if USE_TRANSIENT_HEAP
#define RSTRUCT_TRANSIENT_P(st) FL_TEST_RAW((obj), RSTRUCT_TRANSIENT_FLAG)
#define RSTRUCT_TRANSIENT_SET(st) FL_SET_RAW((st), RSTRUCT_TRANSIENT_FLAG)
#define RSTRUCT_TRANSIENT_UNSET(st) FL_UNSET_RAW((st), RSTRUCT_TRANSIENT_FLAG)
#else
#define RSTRUCT_TRANSIENT_P(st) 0
#define RSTRUCT_TRANSIENT_SET(st) ((void)0)
#define RSTRUCT_TRANSIENT_UNSET(st) ((void)0)
#endif
struct RStruct {
struct RBasic basic;
union {
struct {
long len;
const VALUE *ptr;
} heap;
const VALUE ary[RSTRUCT_EMBED_LEN_MAX];
} as;
};
#undef RSTRUCT_LEN
#undef RSTRUCT_PTR
#undef RSTRUCT_SET
#undef RSTRUCT_GET
#define RSTRUCT_EMBED_LEN(st) \
(long)((RBASIC(st)->flags >> RSTRUCT_EMBED_LEN_SHIFT) & \
(RSTRUCT_EMBED_LEN_MASK >> RSTRUCT_EMBED_LEN_SHIFT))
#define RSTRUCT_LEN(st) rb_struct_len(st)
#define RSTRUCT_LENINT(st) rb_long2int(RSTRUCT_LEN(st))
#define RSTRUCT_CONST_PTR(st) rb_struct_const_ptr(st)
#define RSTRUCT_PTR(st) ((VALUE *)RSTRUCT_CONST_PTR(RB_OBJ_WB_UNPROTECT_FOR(STRUCT, st)))
#define RSTRUCT_SET(st, idx, v) RB_OBJ_WRITE(st, &RSTRUCT_CONST_PTR(st)[idx], (v))
#define RSTRUCT_GET(st, idx) (RSTRUCT_CONST_PTR(st)[idx])
#define RSTRUCT(obj) (R_CAST(RStruct)(obj))
static inline long
rb_struct_len(VALUE st)
{
return (RBASIC(st)->flags & RSTRUCT_EMBED_LEN_MASK) ?
RSTRUCT_EMBED_LEN(st) : RSTRUCT(st)->as.heap.len;
}
static inline const VALUE *
rb_struct_const_ptr(VALUE st)
{
return FIX_CONST_VALUE_PTR((RBASIC(st)->flags & RSTRUCT_EMBED_LEN_MASK) ?
RSTRUCT(st)->as.ary : RSTRUCT(st)->as.heap.ptr);
}
static inline const VALUE *
rb_struct_const_heap_ptr(VALUE st)
{
/* TODO: check embed on debug mode */
return RSTRUCT(st)->as.heap.ptr;
}
/* class.c */
struct rb_deprecated_classext_struct {
char conflict[sizeof(VALUE) * 3];
};
struct rb_subclass_entry;
typedef struct rb_subclass_entry rb_subclass_entry_t;
struct rb_subclass_entry {
VALUE klass;
rb_subclass_entry_t *next;
};
#if defined(HAVE_LONG_LONG)
typedef unsigned LONG_LONG rb_serial_t;
#define SERIALT2NUM ULL2NUM
#define PRI_SERIALT_PREFIX PRI_LL_PREFIX
#elif defined(HAVE_UINT64_T)
typedef uint64_t rb_serial_t;
#define SERIALT2NUM SIZET2NUM
#define PRI_SERIALT_PREFIX PRI_64_PREFIX
#else
typedef unsigned long rb_serial_t;
#define SERIALT2NUM ULONG2NUM
#define PRI_SERIALT_PREFIX PRI_LONG_PREFIX
#endif
struct rb_classext_struct {
struct st_table *iv_index_tbl;
struct st_table *iv_tbl;
struct rb_id_table *const_tbl;
struct rb_id_table *callable_m_tbl;
rb_subclass_entry_t *subclasses;
rb_subclass_entry_t **parent_subclasses;
/**
* In the case that this is an `ICLASS`, `module_subclasses` points to the link
* in the module's `subclasses` list that indicates that the klass has been
* included. Hopefully that makes sense.
*/
rb_subclass_entry_t **module_subclasses;
rb_serial_t class_serial;
const VALUE origin_;
const VALUE refined_class;
rb_alloc_func_t allocator;
};
typedef struct rb_classext_struct rb_classext_t;
#undef RClass
struct RClass {
struct RBasic basic;
VALUE super;
rb_classext_t *ptr;
struct rb_id_table *m_tbl;
};
void rb_class_subclass_add(VALUE super, VALUE klass);
void rb_class_remove_from_super_subclasses(VALUE);
int rb_singleton_class_internal_p(VALUE sklass);
#define RCLASS_EXT(c) (RCLASS(c)->ptr)
#define RCLASS_IV_TBL(c) (RCLASS_EXT(c)->iv_tbl)
#define RCLASS_CONST_TBL(c) (RCLASS_EXT(c)->const_tbl)
* internal.h: remove struct method_table_wrapper. struct method_table_wrapper was introduced to avoid duplicate marking for method tables. For example, `module M1; def foo; end; end` make one method table (mtbl) contains a method `foo`. M1 (T_MODULE) points mtbl. Classes C1 and C2 includes M1, then two T_ICLASS objects are created and they points mtbl too. In this case, three objects (one T_MODULE and two T_ICLASS objects) points same mtbl. On marking phase, these three objects mark same mtbl. To avoid such duplication, struct method_table_wrapper was introduced. However, created two T_ICLASS objects have same or shorter lifetime than M1 (T_MODULE) object. So that we only need to mark mtbl from M1, not from T_ICLASS objects. This patch tries marking only from M1. Note that one `Module#prepend` call creates two T_ICLASS objects. One for refering to a prepending Module object, same as `Module#include`. We don't nedd to care this T_ICLASS. One for moving original mtbl from a prepending class. We need to mark such mtbl from this T_ICLASS object. To mark the mtbl, we need to use `RCLASS_ORIGIN(klass)` on marking from a prepended class `klass`. * class.c: ditto. * eval.c (rb_using_refinement): ditto. * gc.c: ditto. * include/ruby/ruby.h: define m_tbl directly. The definition of struct RClass should be moved to (srcdir)/internal.h. * method.h: remove decl of rb_free_m_tbl_wrapper(). * object.c: use RCLASS_M_TBL() directly. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49862 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 01:20:14 +03:00
#define RCLASS_M_TBL(c) (RCLASS(c)->m_tbl)
* method.h: introduce rb_callable_method_entry_t to remove rb_control_frame_t::klass. [Bug #11278], [Bug #11279] rb_method_entry_t data belong to modules/classes. rb_method_entry_t::owner points defined module or class. module M def foo; end end In this case, owner is M. rb_callable_method_entry_t data belong to only classes. For modules, MRI creates corresponding T_ICLASS internally. rb_callable_method_entry_t can also belong to T_ICLASS. rb_callable_method_entry_t::defined_class points T_CLASS or T_ICLASS. rb_method_entry_t data for classes (not for modules) are also rb_callable_method_entry_t data because it is completely same data. In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class. For example, there are classes C and D, and incldues M, class C; include M; end class D; include M; end then, two T_ICLASS objects for C's super class and D's super class will be created. When C.new.foo is called, then M#foo is searcheed and rb_callable_method_t data is used by VM to invoke M#foo. rb_method_entry_t data is only one for M#foo. However, rb_callable_method_entry_t data are two (and can be more). It is proportional to the number of including (and prepending) classes (the number of T_ICLASS which point to the module). Now, created rb_callable_method_entry_t are collected when the original module M was modified. We can think it is a cache. We need to select what kind of method entry data is needed. To operate definition, then you need to use rb_method_entry_t. You can access them by the following functions. * rb_method_entry(VALUE klass, ID id); * rb_method_entry_with_refinements(VALUE klass, ID id); * rb_method_entry_without_refinements(VALUE klass, ID id); * rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me); To invoke methods, then you need to use rb_callable_method_entry_t which you can get by the following APIs corresponding to the above listed functions. * rb_callable_method_entry(VALUE klass, ID id); * rb_callable_method_entry_with_refinements(VALUE klass, ID id); * rb_callable_method_entry_without_refinements(VALUE klass, ID id); * rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me); VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry() returns rb_callable_method_entry_t. You can check a super class of current method by rb_callable_method_entry_t::defined_class. * method.h: renamed from rb_method_entry_t::klass to rb_method_entry_t::owner. * internal.h: add rb_classext_struct::callable_m_tbl to cache rb_callable_method_entry_t data. We need to consider abotu this field again because it is only active for T_ICLASS. * class.c (method_entry_i): ditto. * class.c (rb_define_attr): rb_method_entry() does not takes defiend_class_ptr. * gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS. * cont.c (fiber_init): rb_control_frame_t::klass is removed. * proc.c: fix `struct METHOD' data structure because rb_callable_method_t has all information. * vm_core.h: remove several fields. * rb_control_frame_t::klass. * rb_block_t::klass. And catch up changes. * eval.c: catch up changes. * gc.c: ditto. * insns.def: ditto. * vm.c: ditto. * vm_args.c: ditto. * vm_backtrace.c: ditto. * vm_dump.c: ditto. * vm_eval.c: ditto. * vm_insnhelper.c: ditto. * vm_method.c: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 14:24:50 +03:00
#define RCLASS_CALLABLE_M_TBL(c) (RCLASS_EXT(c)->callable_m_tbl)
#define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl)
#define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin_)
#define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class)
#define RCLASS_SERIAL(c) (RCLASS_EXT(c)->class_serial)
#define RCLASS_CLONED FL_USER6
#define RICLASS_IS_ORIGIN FL_USER5
static inline void
RCLASS_SET_ORIGIN(VALUE klass, VALUE origin)
{
RB_OBJ_WRITE(klass, &RCLASS_ORIGIN(klass), origin);
if (klass != origin) FL_SET(origin, RICLASS_IS_ORIGIN);
}
* include/ruby/ruby.h: constify RBasic::klass and add RBASIC_CLASS(obj) macro which returns a class of `obj'. This change is a part of RGENGC branch [ruby-trunk - Feature #8339]. * object.c: add new function rb_obj_reveal(). This function reveal interal (hidden) object by rb_obj_hide(). Note that do not change class before and after hiding. Only permitted example is: klass = RBASIC_CLASS(obj); rb_obj_hide(obj); .... rb_obj_reveal(obj, klass); TODO: API design. rb_obj_reveal() should be replaced with others. TODO: modify constified variables using cast may be harmful for compiler's analysis and optimizaton. Any idea to prohibt inserting RBasic::klass directly? If rename RBasic::klass and force to use RBASIC_CLASS(obj), then all codes such as `RBASIC(obj)->klass' will be compilation error. Is it acceptable? (We have similar experience at Ruby 1.9, for example "RARRAY(ary)->ptr" to "RARRAY_PTR(ary)". * internal.h: add some macros. * RBASIC_CLEAR_CLASS(obj) clear RBasic::klass to make it internal object. * RBASIC_SET_CLASS(obj, cls) set RBasic::klass. * RBASIC_SET_CLASS_RAW(obj, cls) same as RBASIC_SET_CLASS without write barrier (planned). * RCLASS_SET_SUPER(a, b) set super class of a. * array.c, class.c, compile.c, encoding.c, enum.c, error.c, eval.c, file.c, gc.c, hash.c, io.c, iseq.c, marshal.c, object.c, parse.y, proc.c, process.c, random.c, ruby.c, sprintf.c, string.c, thread.c, transcode.c, vm.c, vm_eval.c, win32/file.c: Use above macros and functions to access RBasic::klass. * ext/coverage/coverage.c, ext/readline/readline.c, ext/socket/ancdata.c, ext/socket/init.c, * ext/zlib/zlib.c: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40691 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 14:49:11 +04:00
#undef RCLASS_SUPER
static inline VALUE
RCLASS_SUPER(VALUE klass)
* include/ruby/ruby.h: constify RBasic::klass and add RBASIC_CLASS(obj) macro which returns a class of `obj'. This change is a part of RGENGC branch [ruby-trunk - Feature #8339]. * object.c: add new function rb_obj_reveal(). This function reveal interal (hidden) object by rb_obj_hide(). Note that do not change class before and after hiding. Only permitted example is: klass = RBASIC_CLASS(obj); rb_obj_hide(obj); .... rb_obj_reveal(obj, klass); TODO: API design. rb_obj_reveal() should be replaced with others. TODO: modify constified variables using cast may be harmful for compiler's analysis and optimizaton. Any idea to prohibt inserting RBasic::klass directly? If rename RBasic::klass and force to use RBASIC_CLASS(obj), then all codes such as `RBASIC(obj)->klass' will be compilation error. Is it acceptable? (We have similar experience at Ruby 1.9, for example "RARRAY(ary)->ptr" to "RARRAY_PTR(ary)". * internal.h: add some macros. * RBASIC_CLEAR_CLASS(obj) clear RBasic::klass to make it internal object. * RBASIC_SET_CLASS(obj, cls) set RBasic::klass. * RBASIC_SET_CLASS_RAW(obj, cls) same as RBASIC_SET_CLASS without write barrier (planned). * RCLASS_SET_SUPER(a, b) set super class of a. * array.c, class.c, compile.c, encoding.c, enum.c, error.c, eval.c, file.c, gc.c, hash.c, io.c, iseq.c, marshal.c, object.c, parse.y, proc.c, process.c, random.c, ruby.c, sprintf.c, string.c, thread.c, transcode.c, vm.c, vm_eval.c, win32/file.c: Use above macros and functions to access RBasic::klass. * ext/coverage/coverage.c, ext/readline/readline.c, ext/socket/ancdata.c, ext/socket/init.c, * ext/zlib/zlib.c: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40691 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 14:49:11 +04:00
{
return RCLASS(klass)->super;
* include/ruby/ruby.h: constify RBasic::klass and add RBASIC_CLASS(obj) macro which returns a class of `obj'. This change is a part of RGENGC branch [ruby-trunk - Feature #8339]. * object.c: add new function rb_obj_reveal(). This function reveal interal (hidden) object by rb_obj_hide(). Note that do not change class before and after hiding. Only permitted example is: klass = RBASIC_CLASS(obj); rb_obj_hide(obj); .... rb_obj_reveal(obj, klass); TODO: API design. rb_obj_reveal() should be replaced with others. TODO: modify constified variables using cast may be harmful for compiler's analysis and optimizaton. Any idea to prohibt inserting RBasic::klass directly? If rename RBasic::klass and force to use RBASIC_CLASS(obj), then all codes such as `RBASIC(obj)->klass' will be compilation error. Is it acceptable? (We have similar experience at Ruby 1.9, for example "RARRAY(ary)->ptr" to "RARRAY_PTR(ary)". * internal.h: add some macros. * RBASIC_CLEAR_CLASS(obj) clear RBasic::klass to make it internal object. * RBASIC_SET_CLASS(obj, cls) set RBasic::klass. * RBASIC_SET_CLASS_RAW(obj, cls) same as RBASIC_SET_CLASS without write barrier (planned). * RCLASS_SET_SUPER(a, b) set super class of a. * array.c, class.c, compile.c, encoding.c, enum.c, error.c, eval.c, file.c, gc.c, hash.c, io.c, iseq.c, marshal.c, object.c, parse.y, proc.c, process.c, random.c, ruby.c, sprintf.c, string.c, thread.c, transcode.c, vm.c, vm_eval.c, win32/file.c: Use above macros and functions to access RBasic::klass. * ext/coverage/coverage.c, ext/readline/readline.c, ext/socket/ancdata.c, ext/socket/init.c, * ext/zlib/zlib.c: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40691 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 14:49:11 +04:00
}
static inline VALUE
RCLASS_SET_SUPER(VALUE klass, VALUE super)
{
if (super) {
rb_class_remove_from_super_subclasses(klass);
rb_class_subclass_add(super, klass);
}
RB_OBJ_WRITE(klass, &RCLASS(klass)->super, super);
return super;
* include/ruby/ruby.h: constify RBasic::klass and add RBASIC_CLASS(obj) macro which returns a class of `obj'. This change is a part of RGENGC branch [ruby-trunk - Feature #8339]. * object.c: add new function rb_obj_reveal(). This function reveal interal (hidden) object by rb_obj_hide(). Note that do not change class before and after hiding. Only permitted example is: klass = RBASIC_CLASS(obj); rb_obj_hide(obj); .... rb_obj_reveal(obj, klass); TODO: API design. rb_obj_reveal() should be replaced with others. TODO: modify constified variables using cast may be harmful for compiler's analysis and optimizaton. Any idea to prohibt inserting RBasic::klass directly? If rename RBasic::klass and force to use RBASIC_CLASS(obj), then all codes such as `RBASIC(obj)->klass' will be compilation error. Is it acceptable? (We have similar experience at Ruby 1.9, for example "RARRAY(ary)->ptr" to "RARRAY_PTR(ary)". * internal.h: add some macros. * RBASIC_CLEAR_CLASS(obj) clear RBasic::klass to make it internal object. * RBASIC_SET_CLASS(obj, cls) set RBasic::klass. * RBASIC_SET_CLASS_RAW(obj, cls) same as RBASIC_SET_CLASS without write barrier (planned). * RCLASS_SET_SUPER(a, b) set super class of a. * array.c, class.c, compile.c, encoding.c, enum.c, error.c, eval.c, file.c, gc.c, hash.c, io.c, iseq.c, marshal.c, object.c, parse.y, proc.c, process.c, random.c, ruby.c, sprintf.c, string.c, thread.c, transcode.c, vm.c, vm_eval.c, win32/file.c: Use above macros and functions to access RBasic::klass. * ext/coverage/coverage.c, ext/readline/readline.c, ext/socket/ancdata.c, ext/socket/init.c, * ext/zlib/zlib.c: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40691 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 14:49:11 +04:00
}
/* IMEMO: Internal memo object */
#ifndef IMEMO_DEBUG
#define IMEMO_DEBUG 0
#endif
struct RIMemo {
VALUE flags;
VALUE v0;
VALUE v1;
VALUE v2;
VALUE v3;
};
enum imemo_type {
imemo_env = 0,
imemo_cref = 1, /*!< class reference */
imemo_svar = 2, /*!< special variable */
imemo_throw_data = 3,
imemo_ifunc = 4, /*!< iterator function */
imemo_memo = 5,
imemo_ment = 6,
imemo_iseq = 7,
imemo_tmpbuf = 8,
imemo_ast = 9,
imemo_parser_strterm = 10
};
#define IMEMO_MASK 0x0f
static inline enum imemo_type
imemo_type(VALUE imemo)
{
return (RBASIC(imemo)->flags >> FL_USHIFT) & IMEMO_MASK;
}
static inline int
imemo_type_p(VALUE imemo, enum imemo_type imemo_type)
{
if (LIKELY(!RB_SPECIAL_CONST_P(imemo))) {
/* fixed at compile time if imemo_type is given. */
const VALUE mask = (IMEMO_MASK << FL_USHIFT) | RUBY_T_MASK;
const VALUE expected_type = (imemo_type << FL_USHIFT) | T_IMEMO;
/* fixed at runtime. */
return expected_type == (RBASIC(imemo)->flags & mask);
}
else {
return 0;
}
}
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0);
/* FL_USER0 to FL_USER3 is for type */
#define IMEMO_FL_USHIFT (FL_USHIFT + 4)
#define IMEMO_FL_USER0 FL_USER4
#define IMEMO_FL_USER1 FL_USER5
#define IMEMO_FL_USER2 FL_USER6
#define IMEMO_FL_USER3 FL_USER7
#define IMEMO_FL_USER4 FL_USER8
/* CREF (Class REFerence) is defined in method.h */
/*! SVAR (Special VARiable) */
struct vm_svar {
VALUE flags;
const VALUE cref_or_me; /*!< class reference or rb_method_entry_t */
const VALUE lastline;
const VALUE backref;
const VALUE others;
};
#define THROW_DATA_CONSUMED IMEMO_FL_USER0
/*! THROW_DATA */
struct vm_throw_data {
VALUE flags;
VALUE reserved;
const VALUE throw_obj;
const struct rb_control_frame_struct *catch_frame;
int throw_state;
};
#define THROW_DATA_P(err) RB_TYPE_P((VALUE)(err), T_IMEMO)
/* IFUNC (Internal FUNCtion) */
struct vm_ifunc_argc {
#if SIZEOF_INT * 2 > SIZEOF_VALUE
signed int min: (SIZEOF_VALUE * CHAR_BIT) / 2;
signed int max: (SIZEOF_VALUE * CHAR_BIT) / 2;
#else
int min, max;
#endif
};
/*! IFUNC (Internal FUNCtion) */
struct vm_ifunc {
VALUE flags;
VALUE reserved;
rb_block_call_func_t func;
const void *data;
struct vm_ifunc_argc argc;
};
#define IFUNC_NEW(a, b, c) ((struct vm_ifunc *)rb_imemo_new(imemo_ifunc, (VALUE)(a), (VALUE)(b), (VALUE)(c), 0))
struct vm_ifunc *rb_vm_ifunc_new(rb_block_call_func_t func, const void *data, int min_argc, int max_argc);
static inline struct vm_ifunc *
rb_vm_ifunc_proc_new(rb_block_call_func_t func, const void *data)
{
return rb_vm_ifunc_new(func, data, 0, UNLIMITED_ARGUMENTS);
}
typedef struct rb_imemo_tmpbuf_struct {
VALUE flags;
VALUE reserved;
VALUE *ptr; /* malloc'ed buffer */
struct rb_imemo_tmpbuf_struct *next; /* next imemo */
size_t cnt; /* buffer size in VALUE */
} rb_imemo_tmpbuf_t;
#define rb_imemo_tmpbuf_auto_free_pointer() rb_imemo_new(imemo_tmpbuf, 0, 0, 0, 0)
VALUE rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt);
rb_imemo_tmpbuf_t *rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt);
#define RB_IMEMO_TMPBUF_PTR(v) \
((void *)(((const struct rb_imemo_tmpbuf_struct *)(v))->ptr))
static inline void *
rb_imemo_tmpbuf_set_ptr(VALUE v, void *ptr)
{
return ((rb_imemo_tmpbuf_t *)v)->ptr = ptr;
}
static inline VALUE
rb_imemo_tmpbuf_auto_free_pointer_new_from_an_RString(VALUE str)
{
const void *src;
VALUE imemo;
rb_imemo_tmpbuf_t *tmpbuf;
void *dst;
size_t len;
SafeStringValue(str);
/* create tmpbuf to keep the pointer before xmalloc */
imemo = rb_imemo_tmpbuf_auto_free_pointer();
tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
len = RSTRING_LEN(str);
src = RSTRING_PTR(str);
dst = ruby_xmalloc(len);
memcpy(dst, src, len);
tmpbuf->ptr = dst;
return imemo;
}
void rb_strterm_mark(VALUE obj);
/*! MEMO
*
* @see imemo_type
* */
struct MEMO {
VALUE flags;
VALUE reserved;
const VALUE v1;
const VALUE v2;
union {
long cnt;
long state;
const VALUE value;
void (*func)(void);
} u3;
};
#define MEMO_V1_SET(m, v) RB_OBJ_WRITE((m), &(m)->v1, (v))
#define MEMO_V2_SET(m, v) RB_OBJ_WRITE((m), &(m)->v2, (v))
#define MEMO_CAST(m) ((struct MEMO *)m)
#define MEMO_NEW(a, b, c) ((struct MEMO *)rb_imemo_new(imemo_memo, (VALUE)(a), (VALUE)(b), (VALUE)(c), 0))
#define roomof(x, y) (((x) + (y) - 1) / (y))
#define type_roomof(x, y) roomof(sizeof(x), sizeof(y))
#define MEMO_FOR(type, value) ((type *)RARRAY_PTR(value))
#define NEW_MEMO_FOR(type, value) \
((value) = rb_ary_tmp_new_fill(type_roomof(type, VALUE)), MEMO_FOR(type, value))
#define NEW_PARTIAL_MEMO_FOR(type, value, member) \
((value) = rb_ary_tmp_new_fill(type_roomof(type, VALUE)), \
rb_ary_set_len((value), offsetof(type, member) / sizeof(VALUE)), \
MEMO_FOR(type, value))
#define STRING_P(s) (RB_TYPE_P((s), T_STRING) && CLASS_OF(s) == rb_cString)
#ifdef RUBY_INTEGER_UNIFICATION
# define rb_cFixnum rb_cInteger
# define rb_cBignum rb_cInteger
#endif
enum {
cmp_opt_Fixnum,
cmp_opt_String,
cmp_opt_Float,
cmp_optimizable_count
};
struct cmp_opt_data {
unsigned int opt_methods;
unsigned int opt_inited;
};
#define NEW_CMP_OPT_MEMO(type, value) \
NEW_PARTIAL_MEMO_FOR(type, value, cmp_opt)
#define CMP_OPTIMIZABLE_BIT(type) (1U << TOKEN_PASTE(cmp_opt_,type))
#define CMP_OPTIMIZABLE(data, type) \
(((data).opt_inited & CMP_OPTIMIZABLE_BIT(type)) ? \
((data).opt_methods & CMP_OPTIMIZABLE_BIT(type)) : \
(((data).opt_inited |= CMP_OPTIMIZABLE_BIT(type)), \
rb_method_basic_definition_p(TOKEN_PASTE(rb_c,type), id_cmp) && \
((data).opt_methods |= CMP_OPTIMIZABLE_BIT(type))))
#define OPTIMIZED_CMP(a, b, data) \
((FIXNUM_P(a) && FIXNUM_P(b) && CMP_OPTIMIZABLE(data, Fixnum)) ? \
(((long)a > (long)b) ? 1 : ((long)a < (long)b) ? -1 : 0) : \
(STRING_P(a) && STRING_P(b) && CMP_OPTIMIZABLE(data, String)) ? \
rb_str_cmp(a, b) : \
(RB_FLOAT_TYPE_P(a) && RB_FLOAT_TYPE_P(b) && CMP_OPTIMIZABLE(data, Float)) ? \
rb_float_cmp(a, b) : \
rb_cmpint(rb_funcallv(a, id_cmp, 1, &b), a, b))
/* ment is in method.h */
/* global variable */
struct rb_global_entry {
struct rb_global_variable *var;
ID id;
};
struct rb_global_entry *rb_global_entry(ID);
VALUE rb_gvar_get(struct rb_global_entry *);
VALUE rb_gvar_set(struct rb_global_entry *, VALUE);
VALUE rb_gvar_defined(struct rb_global_entry *);
/* array.c */
#ifndef ARRAY_DEBUG
#define ARRAY_DEBUG (0+RUBY_DEBUG)
#endif
#ifdef ARRAY_DEBUG
#define RARRAY_PTR_IN_USE_FLAG FL_USER14
#define ARY_PTR_USING_P(ary) FL_TEST_RAW((ary), RARRAY_PTR_IN_USE_FLAG)
#else
/* disable debug function */
#undef RARRAY_PTR_USE_START_TRANSIENT
#undef RARRAY_PTR_USE_END_TRANSIENT
#define RARRAY_PTR_USE_START_TRANSIENT(a) ((VALUE *)RARRAY_CONST_PTR_TRANSIENT(a))
#define RARRAY_PTR_USE_END_TRANSIENT(a)
#define ARY_PTR_USING_P(ary) 0
#endif
#if USE_TRANSIENT_HEAP
#define RARY_TRANSIENT_SET(ary) FL_SET_RAW((ary), RARRAY_TRANSIENT_FLAG);
#define RARY_TRANSIENT_UNSET(ary) FL_UNSET_RAW((ary), RARRAY_TRANSIENT_FLAG);
#else
#undef RARRAY_TRANSIENT_P
#define RARRAY_TRANSIENT_P(a) 0
#define RARY_TRANSIENT_SET(ary) ((void)0)
#define RARY_TRANSIENT_UNSET(ary) ((void)0)
#endif
VALUE rb_ary_last(int, const VALUE *, VALUE);
void rb_ary_set_len(VALUE, long);
void rb_ary_delete_same(VALUE, VALUE);
VALUE rb_ary_tmp_new_fill(long capa);
VALUE rb_ary_at(VALUE, VALUE);
VALUE rb_ary_aref1(VALUE ary, VALUE i);
VALUE rb_ary_aref2(VALUE ary, VALUE b, VALUE e);
size_t rb_ary_memsize(VALUE);
VALUE rb_to_array_type(VALUE obj);
VALUE rb_check_to_array(VALUE ary);
VALUE rb_ary_tmp_new_from_values(VALUE, long, const VALUE *);
VALUE rb_ary_behead(VALUE, long);
#if defined(__GNUC__) && defined(HAVE_VA_ARGS_MACRO)
#define rb_ary_new_from_args(n, ...) \
__extension__ ({ \
const VALUE args_to_new_ary[] = {__VA_ARGS__}; \
if (__builtin_constant_p(n)) { \
STATIC_ASSERT(rb_ary_new_from_args, numberof(args_to_new_ary) == (n)); \
} \
rb_ary_new_from_values(numberof(args_to_new_ary), args_to_new_ary); \
})
#endif
static inline VALUE
rb_ary_entry_internal(VALUE ary, long offset)
{
long len = RARRAY_LEN(ary);
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
if (len == 0) return Qnil;
if (offset < 0) {
offset += len;
if (offset < 0) return Qnil;
}
else if (len <= offset) {
return Qnil;
}
return ptr[offset];
}
/* MRI debug support */
void rb_obj_info_dump(VALUE obj);
void rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func);
void ruby_debug_breakpoint(void);
// show obj data structure without any side-effect
#define rp(obj) rb_obj_info_dump_loc((VALUE)(obj), __FILE__, __LINE__, __func__)
// same as rp, but add message header
#define rp_m(msg, obj) do { \
fprintf(stderr, "%s", (msg)); \
rb_obj_info_dump((VALUE)obj); \
} while (0)
// `ruby_debug_breakpoint()` does nothing,
// but breakpoint is set in run.gdb, so `make gdb` can stop here.
#define bp() ruby_debug_breakpoint()
/* bignum.c */
extern const char ruby_digitmap[];
double rb_big_fdiv_double(VALUE x, VALUE y);
VALUE rb_big_uminus(VALUE x);
VALUE rb_big_hash(VALUE);
VALUE rb_big_odd_p(VALUE);
VALUE rb_big_even_p(VALUE);
size_t rb_big_size(VALUE);
VALUE rb_integer_float_cmp(VALUE x, VALUE y);
VALUE rb_integer_float_eq(VALUE x, VALUE y);
VALUE rb_cstr_parse_inum(const char *str, ssize_t len, char **endp, int base);
VALUE rb_str_convert_to_inum(VALUE str, int base, int badcheck, int raise_exception);
VALUE rb_big_comp(VALUE x);
VALUE rb_big_aref(VALUE x, VALUE y);
VALUE rb_big_abs(VALUE x);
VALUE rb_big_size_m(VALUE big);
VALUE rb_big_bit_length(VALUE big);
VALUE rb_big_remainder(VALUE x, VALUE y);
VALUE rb_big_gt(VALUE x, VALUE y);
VALUE rb_big_ge(VALUE x, VALUE y);
VALUE rb_big_lt(VALUE x, VALUE y);
VALUE rb_big_le(VALUE x, VALUE y);
VALUE rb_int_powm(int const argc, VALUE * const argv, VALUE const num);
/* class.c */
VALUE rb_class_boot(VALUE);
VALUE rb_class_inherited(VALUE, VALUE);
VALUE rb_make_metaclass(VALUE, VALUE);
VALUE rb_include_class_new(VALUE, VALUE);
void rb_class_foreach_subclass(VALUE klass, void (*f)(VALUE, VALUE), VALUE);
void rb_class_detach_subclasses(VALUE);
void rb_class_detach_module_subclasses(VALUE);
void rb_class_remove_from_module_subclasses(VALUE);
VALUE rb_obj_methods(int argc, const VALUE *argv, VALUE obj);
VALUE rb_obj_protected_methods(int argc, const VALUE *argv, VALUE obj);
VALUE rb_obj_private_methods(int argc, const VALUE *argv, VALUE obj);
VALUE rb_obj_public_methods(int argc, const VALUE *argv, VALUE obj);
VALUE rb_special_singleton_class(VALUE);
VALUE rb_singleton_class_clone_and_attach(VALUE obj, VALUE attach);
VALUE rb_singleton_class_get(VALUE obj);
void Init_class_hierarchy(void);
int rb_class_has_methods(VALUE c);
void rb_undef_methods_from(VALUE klass, VALUE super);
/* compar.c */
VALUE rb_invcmp(VALUE, VALUE);
/* compile.c */
struct rb_block;
struct rb_iseq_struct;
int rb_dvar_defined(ID, const struct rb_iseq_struct *);
int rb_local_defined(ID, const struct rb_iseq_struct *);
const char * rb_insns_name(int i);
VALUE rb_insns_name_array(void);
mjit_compile.c: merge initial JIT compiler which has been developed by Takashi Kokubun <takashikkbn@gmail> as YARV-MJIT. Many of its bugs are fixed by wanabe <s.wanabe@gmail.com>. This JIT compiler is designed to be a safe migration path to introduce JIT compiler to MRI. So this commit does not include any bytecode changes or dynamic instruction modifications, which are done in original MJIT. This commit even strips off some aggressive optimizations from YARV-MJIT, and thus it's slower than YARV-MJIT too. But it's still fairly faster than Ruby 2.5 in some benchmarks (attached below). Note that this JIT compiler passes `make test`, `make test-all`, `make test-spec` without JIT, and even with JIT. Not only it's perfectly safe with JIT disabled because it does not replace VM instructions unlike MJIT, but also with JIT enabled it stably runs Ruby applications including Rails applications. I'm expecting this version as just "initial" JIT compiler. I have many optimization ideas which are skipped for initial merging, and you may easily replace this JIT compiler with a faster one by just replacing mjit_compile.c. `mjit_compile` interface is designed for the purpose. common.mk: update dependencies for mjit_compile.c. internal.h: declare `rb_vm_insn_addr2insn` for MJIT. vm.c: exclude some definitions if `-DMJIT_HEADER` is provided to compiler. This avoids to include some functions which take a long time to compile, e.g. vm_exec_core. Some of the purpose is achieved in transform_mjit_header.rb (see `IGNORED_FUNCTIONS`) but others are manually resolved for now. Load mjit_helper.h for MJIT header. mjit_helper.h: New. This is a file used only by JIT-ed code. I'll refactor `mjit_call_cfunc` later. vm_eval.c: add some #ifdef switches to skip compiling some functions like Init_vm_eval. win32/mkexports.rb: export thread/ec functions, which are used by MJIT. include/ruby/defines.h: add MJIT_FUNC_EXPORTED macro alis to clarify that a function is exported only for MJIT. array.c: export a function used by MJIT. bignum.c: ditto. class.c: ditto. compile.c: ditto. error.c: ditto. gc.c: ditto. hash.c: ditto. iseq.c: ditto. numeric.c: ditto. object.c: ditto. proc.c: ditto. re.c: ditto. st.c: ditto. string.c: ditto. thread.c: ditto. variable.c: ditto. vm_backtrace.c: ditto. vm_insnhelper.c: ditto. vm_method.c: ditto. I would like to improve maintainability of function exports, but I believe this way is acceptable as initial merging if we clarify the new exports are for MJIT (so that we can use them as TODO list to fix) and add unit tests to detect unresolved symbols. I'll add unit tests of JIT compilations in succeeding commits. Author: Takashi Kokubun <takashikkbn@gmail.com> Contributor: wanabe <s.wanabe@gmail.com> Part of [Feature #14235] --- * Known issues * Code generated by gcc is faster than clang. The benchmark may be worse in macOS. Following benchmark result is provided by gcc w/ Linux. * Performance is decreased when Google Chrome is running * JIT can work on MinGW, but it doesn't improve performance at least in short running benchmark. * Currently it doesn't perform well with Rails. We'll try to fix this before release. --- * Benchmark reslts Benchmarked with: Intel 4.0GHz i7-4790K with 16GB memory under x86-64 Ubuntu 8 Cores - 2.0.0-p0: Ruby 2.0.0-p0 - r62186: Ruby trunk (early 2.6.0), before MJIT changes - JIT off: On this commit, but without `--jit` option - JIT on: On this commit, and with `--jit` option ** Optcarrot fps Benchmark: https://github.com/mame/optcarrot | |2.0.0-p0 |r62186 |JIT off |JIT on | |:--------|:--------|:--------|:--------|:--------| |fps |37.32 |51.46 |51.31 |58.88 | |vs 2.0.0 |1.00x |1.38x |1.37x |1.58x | ** MJIT benchmarks Benchmark: https://github.com/benchmark-driver/mjit-benchmarks (Original: https://github.com/vnmakarov/ruby/tree/rtl_mjit_branch/MJIT-benchmarks) | |2.0.0-p0 |r62186 |JIT off |JIT on | |:----------|:--------|:--------|:--------|:--------| |aread |1.00 |1.09 |1.07 |2.19 | |aref |1.00 |1.13 |1.11 |2.22 | |aset |1.00 |1.50 |1.45 |2.64 | |awrite |1.00 |1.17 |1.13 |2.20 | |call |1.00 |1.29 |1.26 |2.02 | |const2 |1.00 |1.10 |1.10 |2.19 | |const |1.00 |1.11 |1.10 |2.19 | |fannk |1.00 |1.04 |1.02 |1.00 | |fib |1.00 |1.32 |1.31 |1.84 | |ivread |1.00 |1.13 |1.12 |2.43 | |ivwrite |1.00 |1.23 |1.21 |2.40 | |mandelbrot |1.00 |1.13 |1.16 |1.28 | |meteor |1.00 |2.97 |2.92 |3.17 | |nbody |1.00 |1.17 |1.15 |1.49 | |nest-ntimes|1.00 |1.22 |1.20 |1.39 | |nest-while |1.00 |1.10 |1.10 |1.37 | |norm |1.00 |1.18 |1.16 |1.24 | |nsvb |1.00 |1.16 |1.16 |1.17 | |red-black |1.00 |1.02 |0.99 |1.12 | |sieve |1.00 |1.30 |1.28 |1.62 | |trees |1.00 |1.14 |1.13 |1.19 | |while |1.00 |1.12 |1.11 |2.41 | ** Discourse's script/bench.rb Benchmark: https://github.com/discourse/discourse/blob/v1.8.7/script/bench.rb NOTE: Rails performance was somehow a little degraded with JIT for now. We should fix this. (At least I know opt_aref is performing badly in JIT and I have an idea to fix it. Please wait for the fix.) *** JIT off Your Results: (note for timings- percentile is first, duration is second in millisecs) categories_admin: 50: 17 75: 18 90: 22 99: 29 home_admin: 50: 21 75: 21 90: 27 99: 40 topic_admin: 50: 17 75: 18 90: 22 99: 32 categories: 50: 35 75: 41 90: 43 99: 77 home: 50: 39 75: 46 90: 49 99: 95 topic: 50: 46 75: 52 90: 56 99: 101 *** JIT on Your Results: (note for timings- percentile is first, duration is second in millisecs) categories_admin: 50: 19 75: 21 90: 25 99: 33 home_admin: 50: 24 75: 26 90: 30 99: 35 topic_admin: 50: 19 75: 20 90: 25 99: 30 categories: 50: 40 75: 44 90: 48 99: 76 home: 50: 42 75: 48 90: 51 99: 89 topic: 50: 49 75: 55 90: 58 99: 99 git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62197 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2018-02-04 14:22:28 +03:00
int rb_vm_insn_addr2insn(const void *);
/* complex.c */
VALUE rb_dbl_complex_new_polar_pi(double abs, double ang);
struct rb_thread_struct;
/* cont.c */
VALUE rb_obj_is_fiber(VALUE);
void rb_fiber_reset_root_local_storage(struct rb_thread_struct *);
void ruby_register_rollback_func_for_ensure(VALUE (*ensure_func)(VALUE), VALUE (*rollback_func)(VALUE));
/* debug.c */
PRINTF_ARGS(void ruby_debug_printf(const char*, ...), 1, 2);
/* dir.c */
VALUE rb_dir_getwd_ospath(void);
/* dmyext.c */
void Init_enc(void);
void Init_ext(void);
/* encoding.c */
ID rb_id_encoding(void);
#ifdef RUBY_ENCODING_H
rb_encoding *rb_enc_get_from_index(int index);
rb_encoding *rb_enc_check_str(VALUE str1, VALUE str2);
#endif
int rb_encdb_replicate(const char *alias, const char *orig);
int rb_encdb_alias(const char *alias, const char *orig);
int rb_encdb_dummy(const char *name);
void rb_encdb_declare(const char *name);
void rb_enc_set_base(const char *name, const char *orig);
int rb_enc_set_dummy(int index);
void rb_encdb_set_unicode(int index);
PUREFUNC(int rb_data_is_encoding(VALUE obj));
/* enum.c */
extern VALUE rb_cArithSeq;
VALUE rb_f_send(int argc, VALUE *argv, VALUE recv);
VALUE rb_nmin_run(VALUE obj, VALUE num, int by, int rev, int ary);
/* error.c */
extern VALUE rb_eEAGAIN;
extern VALUE rb_eEWOULDBLOCK;
extern VALUE rb_eEINPROGRESS;
void rb_report_bug_valist(VALUE file, int line, const char *fmt, va_list args);
VALUE rb_check_backtrace(VALUE);
NORETURN(void rb_async_bug_errno(const char *,int));
const char *rb_builtin_type_name(int t);
const char *rb_builtin_class_name(VALUE x);
PRINTF_ARGS(void rb_sys_warn(const char *fmt, ...), 1, 2);
PRINTF_ARGS(void rb_syserr_warn(int err, const char *fmt, ...), 2, 3);
PRINTF_ARGS(void rb_sys_warning(const char *fmt, ...), 1, 2);
PRINTF_ARGS(void rb_syserr_warning(int err, const char *fmt, ...), 2, 3);
#ifdef RUBY_ENCODING_H
VALUE rb_syntax_error_append(VALUE, VALUE, int, int, rb_encoding*, const char*, va_list);
PRINTF_ARGS(void rb_enc_warn(rb_encoding *enc, const char *fmt, ...), 2, 3);
PRINTF_ARGS(void rb_sys_enc_warn(rb_encoding *enc, const char *fmt, ...), 2, 3);
PRINTF_ARGS(void rb_syserr_enc_warn(int err, rb_encoding *enc, const char *fmt, ...), 3, 4);
PRINTF_ARGS(void rb_enc_warning(rb_encoding *enc, const char *fmt, ...), 2, 3);
PRINTF_ARGS(void rb_sys_enc_warning(rb_encoding *enc, const char *fmt, ...), 2, 3);
PRINTF_ARGS(void rb_syserr_enc_warning(int err, rb_encoding *enc, const char *fmt, ...), 3, 4);
#endif
#define rb_raise_cstr(etype, mesg) \
rb_exc_raise(rb_exc_new_str(etype, rb_str_new_cstr(mesg)))
#define rb_raise_static(etype, mesg) \
rb_exc_raise(rb_exc_new_str(etype, rb_str_new_static(mesg, rb_strlen_lit(mesg))))
VALUE rb_name_err_new(VALUE mesg, VALUE recv, VALUE method);
#define rb_name_err_raise_str(mesg, recv, name) \
rb_exc_raise(rb_name_err_new(mesg, recv, name))
#define rb_name_err_raise(mesg, recv, name) \
rb_name_err_raise_str(rb_fstring_cstr(mesg), (recv), (name))
VALUE rb_nomethod_err_new(VALUE mesg, VALUE recv, VALUE method, VALUE args, int priv);
VALUE rb_key_err_new(VALUE mesg, VALUE recv, VALUE name);
#define rb_key_err_raise(mesg, recv, name) \
rb_exc_raise(rb_key_err_new(mesg, recv, name))
NORETURN(void ruby_deprecated_internal_feature(const char *));
#define DEPRECATED_INTERNAL_FEATURE(func) \
(ruby_deprecated_internal_feature(func), UNREACHABLE)
VALUE rb_warning_warn(VALUE mod, VALUE str);
PRINTF_ARGS(VALUE rb_warning_string(const char *fmt, ...), 1, 2);
NORETURN(void rb_vraise(VALUE, const char *, va_list));
* revised r37993 to avoid SEGV/ILL in tests. In r37993, a method entry with VM_METHOD_TYPE_REFINED holds only the original method definition, so ci->me is set to a method entry allocated in the stack, and it causes SEGV/ILL. In this commit, a method entry with VM_METHOD_TYPE_REFINED holds the whole original method entry. Furthermore, rb_thread_mark() is changed to mark cfp->klass to avoid GC for iclasses created by copy_refinement_iclass(). * vm_method.c (rb_method_entry_make): add a method entry with VM_METHOD_TYPE_REFINED to the class refined by the refinement if the target module is a refinement. When a method entry with VM_METHOD_TYPE_UNDEF is invoked by vm_call_method(), a method with the same name is searched in refinements. If such a method is found, the method is invoked. Otherwise, the original method in the refined class (rb_method_definition_t::body.orig_me) is invoked. This change is made to simplify the normal method lookup and to improve the performance of normal method calls. * vm_method.c (EXPR1, search_method, rb_method_entry), vm_eval.c (rb_call0, rb_search_method_entry): do not use refinements for method lookup. * vm_insnhelper.c (vm_call_method): search methods in refinements if ci->me is VM_METHOD_TYPE_REFINED. If the method is called by super (i.e., ci->call == vm_call_super_method), skip the same method entry as the current method to avoid infinite call of the same method. * class.c (include_modules_at): add a refined method entry for each method defined in a module included in a refinement. * class.c (rb_prepend_module): set an empty table to RCLASS_M_TBL(klass) to add refined method entries, because refinements should have priority over prepended modules. * proc.c (mnew): use rb_method_entry_with_refinements() to get a refined method. * vm.c (rb_thread_mark): mark cfp->klass for iclasses created by copy_refinement_iclass(). * vm.c (Init_VM), cont.c (fiber_init): initialize th->cfp->klass. * test/ruby/test_refinement.rb (test_inline_method_cache): do not skip the test because it should pass successfully. * test/ruby/test_refinement.rb (test_redefine_refined_method): new test for the case a refined method is redefined. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38236 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-06 17:08:41 +04:00
/* eval.c */
VALUE rb_refinement_module_get_refined_class(VALUE module);
extern ID ruby_static_id_signo, ruby_static_id_status;
void rb_class_modify_check(VALUE);
#define id_signo ruby_static_id_signo
#define id_status ruby_static_id_status
NORETURN(VALUE rb_f_raise(int argc, VALUE *argv));
* revised r37993 to avoid SEGV/ILL in tests. In r37993, a method entry with VM_METHOD_TYPE_REFINED holds only the original method definition, so ci->me is set to a method entry allocated in the stack, and it causes SEGV/ILL. In this commit, a method entry with VM_METHOD_TYPE_REFINED holds the whole original method entry. Furthermore, rb_thread_mark() is changed to mark cfp->klass to avoid GC for iclasses created by copy_refinement_iclass(). * vm_method.c (rb_method_entry_make): add a method entry with VM_METHOD_TYPE_REFINED to the class refined by the refinement if the target module is a refinement. When a method entry with VM_METHOD_TYPE_UNDEF is invoked by vm_call_method(), a method with the same name is searched in refinements. If such a method is found, the method is invoked. Otherwise, the original method in the refined class (rb_method_definition_t::body.orig_me) is invoked. This change is made to simplify the normal method lookup and to improve the performance of normal method calls. * vm_method.c (EXPR1, search_method, rb_method_entry), vm_eval.c (rb_call0, rb_search_method_entry): do not use refinements for method lookup. * vm_insnhelper.c (vm_call_method): search methods in refinements if ci->me is VM_METHOD_TYPE_REFINED. If the method is called by super (i.e., ci->call == vm_call_super_method), skip the same method entry as the current method to avoid infinite call of the same method. * class.c (include_modules_at): add a refined method entry for each method defined in a module included in a refinement. * class.c (rb_prepend_module): set an empty table to RCLASS_M_TBL(klass) to add refined method entries, because refinements should have priority over prepended modules. * proc.c (mnew): use rb_method_entry_with_refinements() to get a refined method. * vm.c (rb_thread_mark): mark cfp->klass for iclasses created by copy_refinement_iclass(). * vm.c (Init_VM), cont.c (fiber_init): initialize th->cfp->klass. * test/ruby/test_refinement.rb (test_inline_method_cache): do not skip the test because it should pass successfully. * test/ruby/test_refinement.rb (test_redefine_refined_method): new test for the case a refined method is redefined. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38236 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-06 17:08:41 +04:00
/* eval_error.c */
VALUE rb_get_backtrace(VALUE info);
/* eval_jump.c */
void rb_call_end_proc(VALUE data);
void rb_mark_end_proc(void);
/* file.c */
extern const char ruby_null_device[];
VALUE rb_home_dir_of(VALUE user, VALUE result);
VALUE rb_default_home_dir(VALUE result);
VALUE rb_realpath_internal(VALUE basedir, VALUE path, int strict);
VALUE rb_check_realpath(VALUE basedir, VALUE path);
void rb_file_const(const char*, VALUE);
int rb_file_load_ok(const char *);
Improve require/File.expand_path performance on Windows * configure.in (mingw): add shlwapi to the list of dependency libs for Windows. * win32/Makefile.sub (EXTSOLIBS): ditto. * internal.h: declare internal functions rb_w32_init_file, rb_file_expand_path_internal and rb_file_expand_path_fast. * file.c (Init_File): invoke Windows initialization rb_w32_init_file * win32/file.c (rb_file_load_path_internal): new function. Windows-specific implementation that replaces file_expand_path. [Bug #6836][ruby-core:46996] * win32/file.c (rb_w32_init_file): new function. Initialize codepage cache for faster conversion encodings lookup. * file.c (file_expand_path): rename to rb_file_expand_path_internal. Conditionally exclude from Windows. * file.c (rb_file_expand_path_fast): new function. delegates to rb_file_expand_path_internal without performing a hit to the filesystem. * file.c (file_expand_path_1): use rb_file_expand_path_internal without path expansion (used by require). * file.c (rb_find_file_ext_safe): ditto. * file.c (rb_find_file_safe): ditto. * load.c (rb_get_expanded_load_path): use rb_file_expand_path_fast. * load.c (rb_feature_provided): ditto. * file.c (rb_file_expand_path): use rb_file_expand_path_internal with path expansion. * file.c (rb_file_absolute_path): ditto. * test/ruby/test_file_exhaustive.rb: new tests to exercise rb_file_expand_path_internal implementation and compliance with existing behaviors. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36811 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-08-24 07:44:56 +04:00
VALUE rb_file_expand_path_fast(VALUE, VALUE);
VALUE rb_file_expand_path_internal(VALUE, VALUE, int, int, VALUE);
Fix compatibility of cached expanded load path * file.c (rb_get_path_check_to_string): extract from rb_get_path_check(). We change the spec not to call to_path of String object. * file.c (rb_get_path_check_convert): extract from rb_get_path_check(). * file.c (rb_get_path_check): follow the above change. * file.c (rb_file_expand_path_fast): remove check_expand_path_args(). Instead we call it in load.c. * file.c (rb_find_file_ext_safe): use rb_get_expanded_load_path() to reduce expand cost. * file.c (rb_find_file_safe): ditto. * internal.h (rb_get_expanded_load_path): add a declaration. * internal.h (rb_get_path_check_to_string, rb_get_path_check_convert): add declarations. * load.c (rb_construct_expanded_load_path): fix for compatibility. Same checks in rb_get_path_check() are added. We don't replace $LOAD_PATH and ensure that String object of $LOAD_PATH are frozen. We don't freeze non String object and expand it every times. We add arguments for expanding load path partially and checking if load path have relative paths or non String objects. * load.c (load_path_getcwd): get current working directory for checking if it's changed when getting load path. * load.c (rb_get_expanded_load_path): fix for rebuilding cache properly. We check if current working directory is changed and rebuild expanded load path cache. We expand paths which start with ~ (User HOME) and non String objects every times for compatibility. We make this accessible from other source files. * load.c (rb_feature_provided): call rb_get_path() since we changed rb_file_expand_path_fast() not to call it. * load.c (Init_load): initialize vm->load_path_check_cache. * vm.c (rb_vm_mark): mark vm->load_path_check_cache for GC. * vm_core.h (rb_vm_struct): add vm->load_path_check_cache to store data to check load path cache validity. * test/ruby/test_require.rb (TestRequire): add tests for require compatibility related to cached expanded load path. [ruby-core:47970] [Bug #7158] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37482 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-11-05 19:27:08 +04:00
VALUE rb_get_path_check_to_string(VALUE, int);
VALUE rb_get_path_check_convert(VALUE, VALUE, int);
VALUE rb_get_path_check(VALUE, int);
void Init_File(void);
int ruby_is_fd_loadable(int fd);
#ifdef RUBY_FUNCTION_NAME_STRING
# if defined __GNUC__ && __GNUC__ >= 4
# pragma GCC visibility push(default)
# endif
NORETURN(void rb_sys_fail_path_in(const char *func_name, VALUE path));
NORETURN(void rb_syserr_fail_path_in(const char *func_name, int err, VALUE path));
# if defined __GNUC__ && __GNUC__ >= 4
# pragma GCC visibility pop
# endif
# define rb_sys_fail_path(path) rb_sys_fail_path_in(RUBY_FUNCTION_NAME_STRING, path)
# define rb_syserr_fail_path(err, path) rb_syserr_fail_path_in(RUBY_FUNCTION_NAME_STRING, (err), (path))
#else
# define rb_sys_fail_path(path) rb_sys_fail_str(path)
# define rb_syserr_fail_path(err, path) rb_syserr_fail_str((err), (path))
#endif
/* gc.c */
extern VALUE *ruby_initial_gc_stress_ptr;
extern int ruby_disable_gc;
void Init_heap(void);
void *ruby_mimmalloc(size_t size) RUBY_ATTR_MALLOC;
void ruby_mimfree(void *ptr);
void rb_objspace_set_event_hook(const rb_event_flag_t event);
* gc.c: add incremental GC algorithm. [Feature #10137] Please refer this ticket for details. This change also introduces the following changes. * Remove RGENGC_AGE2_PROMOTION and introduce object age (0 to 3). Age can be count with FL_PROMOTE0 and FL_PROMOTE1 flags in RBasic::flags (2 bit). Age == 3 objects become old objects. * WB_PROTECTED flag in RBasic to WB_UNPROTECTED bitmap. * LONG_LIVED bitmap to represent living objects while minor GCs It specifies (1) Old objects and (2) remembered shady objects. * Introduce rb_objspace_t::marked_objects which counts marked objects in current marking phase. marking count is needed to introduce incremental marking. * rename mark related function and sweep related function to gc_(marks|sweep)_(start|finish|step|rest|continue). * rename rgengc_report() to gc_report(). * Add obj_info() function to get cstr of object details. * Add MEASURE_LINE() macro to measure execution time of specific line. * and many small fixes. * include/ruby/ruby.h: add flag USE_RINCGC. Now USE_RINCGC can be set only with USE_RGENGC. * include/ruby/ruby.h: introduce FL_PROMOTED0 and add FL_PROMOTED1 to count object age. * include/ruby/ruby.h: rewrite write barriers for incremental marking. * debug.c: catch up flag name changes. * internal.h: add rb_gc_writebarrier_remember() instead of rb_gc_writebarrier_remember_promoted(). * array.c (ary_memcpy0): use rb_gc_writebarrier_remember(). * array.c (rb_ary_modify): ditto. * hash.c (rb_hash_keys): ditto. * hash.c (rb_hash_values): ditto. * object.c (init_copy): use rb_copy_wb_protected_attribute() because FL_WB_PROTECTED is moved from RBasic::flags. * test/objspace/test_objspace.rb: catch up ObjectSpace.dump() changes. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@47444 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-09-08 08:11:00 +04:00
#if USE_RGENGC
void rb_gc_writebarrier_remember(VALUE obj);
#else
#define rb_gc_writebarrier_remember(obj) 0
#endif
void ruby_gc_set_params(int safe_level);
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj);
#if defined(HAVE_MALLOC_USABLE_SIZE) || defined(HAVE_MALLOC_SIZE) || defined(_WIN32)
#define ruby_sized_xrealloc(ptr, new_size, old_size) ruby_xrealloc(ptr, new_size)
#define ruby_sized_xrealloc2(ptr, new_count, element_size, old_count) ruby_xrealloc2(ptr, new_count, element_size)
#define ruby_sized_xfree(ptr, size) ruby_xfree(ptr)
#define SIZED_REALLOC_N(var,type,n,old_n) REALLOC_N(var, type, n)
#else
RUBY_SYMBOL_EXPORT_BEGIN
void *ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_ALLOC_SIZE((2));
void *ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_ALLOC_SIZE((2, 3));
void ruby_sized_xfree(void *x, size_t size);
RUBY_SYMBOL_EXPORT_END
#define SIZED_REALLOC_N(var,type,n,old_n) ((var)=(type*)ruby_sized_xrealloc2((void*)(var), (n), sizeof(type), (old_n)))
#endif
/* optimized version of NEWOBJ() */
#undef NEWOBJF_OF
#undef RB_NEWOBJ_OF
#define RB_NEWOBJ_OF(obj,type,klass,flags) \
type *(obj) = (type*)(((flags) & FL_WB_PROTECTED) ? \
rb_wb_protected_newobj_of(klass, (flags) & ~FL_WB_PROTECTED) : \
rb_wb_unprotected_newobj_of(klass, flags))
#define NEWOBJ_OF(obj,type,klass,flags) RB_NEWOBJ_OF(obj,type,klass,flags)
#ifdef __has_attribute
#if __has_attribute(alloc_align)
__attribute__((__alloc_align__(1)))
#endif
#endif
void *rb_aligned_malloc(size_t, size_t) RUBY_ATTR_MALLOC RUBY_ATTR_ALLOC_SIZE((2));
void rb_aligned_free(void *);
size_t rb_size_mul_or_raise(size_t, size_t, VALUE); /* used in compile.c */
size_t rb_size_mul_add_or_raise(size_t, size_t, size_t, VALUE); /* used in iseq.h */
void *rb_xmalloc_mul_add(size_t, size_t, size_t) RUBY_ATTR_MALLOC;
void *rb_xrealloc_mul_add(const void *, size_t, size_t, size_t);
void *rb_xmalloc_mul_add_mul(size_t, size_t, size_t, size_t) RUBY_ATTR_MALLOC;
void *rb_xcalloc_mul_add_mul(size_t, size_t, size_t, size_t) RUBY_ATTR_MALLOC;
/* hash.c */
#if RHASH_CONVERT_TABLE_DEBUG
struct st_table *rb_hash_tbl_raw(VALUE hash, const char *file, int line);
#define RHASH_TBL_RAW(h) rb_hash_tbl_raw(h, __FILE__, __LINE__)
#else
struct st_table *rb_hash_tbl_raw(VALUE hash);
#define RHASH_TBL_RAW(h) rb_hash_tbl_raw(h)
#endif
VALUE rb_hash_new_with_size(st_index_t size);
* rewrite method/block parameter fitting logic to optimize keyword arguments/parameters and a splat argument. [Feature #10440] (Details are described in this ticket) Most of complex part is moved to vm_args.c. Now, ISeq#to_a does not catch up new instruction format. * vm_core.h: change iseq data structures. * introduce rb_call_info_kw_arg_t to represent keyword arguments. * add rb_call_info_t::kw_arg. * rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num. * rename rb_iseq_t::arg_keywords to arg_keyword_num. * rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits. to represent keyword bitmap parameter index. This bitmap parameter shows that which keyword parameters are given or not given (0 for given). It is refered by `checkkeyword' instruction described bellow. * rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest to represent keyword rest parameter index. * add rb_iseq_t::arg_keyword_default_values to represent default keyword values. * rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE to represent (ci->flag & (SPLAT|BLOCKARG)) && ci->blockiseq == NULL && ci->kw_arg == NULL. * vm_insnhelper.c, vm_args.c: rewrite with refactoring. * rewrite splat argument code. * rewrite keyword arguments/parameters code. * merge method and block parameter fitting code into one code base. * vm.c, vm_eval.c: catch up these changes. * compile.c (new_callinfo): callinfo requires kw_arg parameter. * compile.c (compile_array_): check the last argument Hash object or not. If Hash object and all keys are Symbol literals, they are compiled to keyword arguments. * insns.def (checkkeyword): add new instruction. This instruction check the availability of corresponding keyword. For example, a method "def foo k1: 'v1'; end" is cimpiled to the following instructions. 0000 checkkeyword 2, 0 # check k1 is given. 0003 branchif 9 # if given, jump to address #9 0005 putstring "v1" 0007 setlocal_OP__WC__0 3 # k1 = 'v1' 0009 trace 8 0011 putnil 0012 trace 16 0014 leave * insns.def (opt_send_simple): removed and add new instruction "opt_send_without_block". * parse.y (new_args_tail_gen): reorder variables. Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)" has parameter variables "k1, kr1, k2, &b, internal_id, krest", but this patch reorders to "kr1, k1, k2, internal_id, krest, &b". (locate a block variable at last) * parse.y (vtable_pop): added. This function remove latest `n' variables from vtable. * iseq.c: catch up iseq data changes. * proc.c: ditto. * class.c (keyword_error): export as rb_keyword_error(). * common.mk: depend vm_args.c for vm.o. * hash.c (rb_hash_has_key): export. * internal.h: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 21:02:55 +03:00
VALUE rb_hash_has_key(VALUE hash, VALUE key);
VALUE rb_hash_default_value(VALUE hash, VALUE key);
VALUE rb_hash_set_default_proc(VALUE hash, VALUE proc);
long rb_objid_hash(st_index_t index);
long rb_dbl_long_hash(double d);
st_table *rb_init_identtable(void);
st_table *rb_init_identtable_with_size(st_index_t size);
VALUE rb_hash_compare_by_id_p(VALUE hash);
VALUE rb_to_hash_type(VALUE obj);
VALUE rb_hash_key_str(VALUE);
VALUE rb_hash_keys(VALUE hash);
VALUE rb_hash_values(VALUE hash);
VALUE rb_hash_rehash(VALUE hash);
VALUE rb_hash_resurrect(VALUE hash);
int rb_hash_add_new_element(VALUE hash, VALUE key, VALUE val);
VALUE rb_hash_set_pair(VALUE hash, VALUE pair);
int rb_hash_stlike_lookup(VALUE hash, st_data_t key, st_data_t *pval);
int rb_hash_stlike_delete(VALUE hash, st_data_t *pkey, st_data_t *pval);
RUBY_SYMBOL_EXPORT_BEGIN
int rb_hash_stlike_foreach(VALUE hash, st_foreach_callback_func *func, st_data_t arg);
RUBY_SYMBOL_EXPORT_END
int rb_hash_stlike_foreach_with_replace(VALUE hash, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg);
int rb_hash_stlike_update(VALUE hash, st_data_t key, st_update_callback_func func, st_data_t arg);
/* inits.c */
void rb_call_inits(void);
/* io.c */
const char *ruby_get_inplace_mode(void);
void ruby_set_inplace_mode(const char *);
ssize_t rb_io_bufread(VALUE io, void *buf, size_t size);
void rb_stdio_set_default_encoding(void);
VALUE rb_io_flush_raw(VALUE, int);
#ifdef RUBY_IO_H
size_t rb_io_memsize(const rb_io_t *);
#endif
int rb_stderr_tty_p(void);
void rb_io_fptr_finalize_internal(void *ptr);
#define rb_io_fptr_finalize rb_io_fptr_finalize_internal
/* load.c */
VALUE rb_get_load_path(void);
Fix compatibility of cached expanded load path * file.c (rb_get_path_check_to_string): extract from rb_get_path_check(). We change the spec not to call to_path of String object. * file.c (rb_get_path_check_convert): extract from rb_get_path_check(). * file.c (rb_get_path_check): follow the above change. * file.c (rb_file_expand_path_fast): remove check_expand_path_args(). Instead we call it in load.c. * file.c (rb_find_file_ext_safe): use rb_get_expanded_load_path() to reduce expand cost. * file.c (rb_find_file_safe): ditto. * internal.h (rb_get_expanded_load_path): add a declaration. * internal.h (rb_get_path_check_to_string, rb_get_path_check_convert): add declarations. * load.c (rb_construct_expanded_load_path): fix for compatibility. Same checks in rb_get_path_check() are added. We don't replace $LOAD_PATH and ensure that String object of $LOAD_PATH are frozen. We don't freeze non String object and expand it every times. We add arguments for expanding load path partially and checking if load path have relative paths or non String objects. * load.c (load_path_getcwd): get current working directory for checking if it's changed when getting load path. * load.c (rb_get_expanded_load_path): fix for rebuilding cache properly. We check if current working directory is changed and rebuild expanded load path cache. We expand paths which start with ~ (User HOME) and non String objects every times for compatibility. We make this accessible from other source files. * load.c (rb_feature_provided): call rb_get_path() since we changed rb_file_expand_path_fast() not to call it. * load.c (Init_load): initialize vm->load_path_check_cache. * vm.c (rb_vm_mark): mark vm->load_path_check_cache for GC. * vm_core.h (rb_vm_struct): add vm->load_path_check_cache to store data to check load path cache validity. * test/ruby/test_require.rb (TestRequire): add tests for require compatibility related to cached expanded load path. [ruby-core:47970] [Bug #7158] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37482 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-11-05 19:27:08 +04:00
VALUE rb_get_expanded_load_path(void);
int rb_require_internal(VALUE fname, int safe);
NORETURN(void rb_load_fail(VALUE, const char*));
/* loadpath.c */
extern const char ruby_exec_prefix[];
extern const char ruby_initial_load_paths[];
/* localeinit.c */
int Init_enc_set_filesystem_encoding(void);
/* math.c */
VALUE rb_math_atan2(VALUE, VALUE);
VALUE rb_math_cos(VALUE);
VALUE rb_math_cosh(VALUE);
VALUE rb_math_exp(VALUE);
VALUE rb_math_hypot(VALUE, VALUE);
VALUE rb_math_log(int argc, const VALUE *argv);
VALUE rb_math_sin(VALUE);
VALUE rb_math_sinh(VALUE);
VALUE rb_math_sqrt(VALUE);
/* mjit.c */
#if USE_MJIT
extern bool mjit_enabled;
VALUE mjit_pause(bool wait_p);
VALUE mjit_resume(void);
void mjit_finish(bool close_handle_p);
#else
#define mjit_enabled 0
static inline VALUE mjit_pause(bool wait_p){ return Qnil; } // unreachable
static inline VALUE mjit_resume(void){ return Qnil; } // unreachable
static inline void mjit_finish(bool close_handle_p){}
#endif
/* newline.c */
void Init_newline(void);
/* numeric.c */
#define FIXNUM_POSITIVE_P(num) ((SIGNED_VALUE)(num) > (SIGNED_VALUE)INT2FIX(0))
#define FIXNUM_NEGATIVE_P(num) ((SIGNED_VALUE)(num) < 0)
#define FIXNUM_ZERO_P(num) ((num) == INT2FIX(0))
#define INT_NEGATIVE_P(x) (FIXNUM_P(x) ? FIXNUM_NEGATIVE_P(x) : BIGNUM_NEGATIVE_P(x))
#define FLOAT_ZERO_P(x) (RFLOAT_VALUE(x) == 0.0)
#ifndef ROUND_DEFAULT
# define ROUND_DEFAULT RUBY_NUM_ROUND_HALF_UP
#endif
enum ruby_num_rounding_mode {
RUBY_NUM_ROUND_HALF_UP,
RUBY_NUM_ROUND_HALF_EVEN,
RUBY_NUM_ROUND_HALF_DOWN,
RUBY_NUM_ROUND_DEFAULT = ROUND_DEFAULT
};
#define ROUND_TO(mode, even, up, down) \
((mode) == RUBY_NUM_ROUND_HALF_EVEN ? even : \
(mode) == RUBY_NUM_ROUND_HALF_UP ? up : down)
#define ROUND_FUNC(mode, name) \
ROUND_TO(mode, name##_half_even, name##_half_up, name##_half_down)
#define ROUND_CALL(mode, name, args) \
ROUND_TO(mode, name##_half_even args, \
name##_half_up args, name##_half_down args)
int rb_num_to_uint(VALUE val, unsigned int *ret);
VALUE ruby_num_interval_step_size(VALUE from, VALUE to, VALUE step, int excl);
double ruby_float_step_size(double beg, double end, double unit, int excl);
int ruby_float_step(VALUE from, VALUE to, VALUE step, int excl, int allow_endless);
double ruby_float_mod(double x, double y);
int rb_num_negative_p(VALUE);
VALUE rb_int_succ(VALUE num);
VALUE rb_int_pred(VALUE num);
VALUE rb_int_uminus(VALUE num);
VALUE rb_float_uminus(VALUE num);
VALUE rb_int_plus(VALUE x, VALUE y);
complex.c: Optimize Complex#+ for some conditions Optimize f_add defined in complex.c for some specific conditions. It makes Complex#+ about 1.4x faster than r66678. Compared to r66678: ``` mrkn-mbp15-late2016:complex-optim-o3 mrkn$ make benchmark ITEM=complex_float_ COMPARE_RUBY=/Users/mrkn/.rbenv/versions/trunk-o3/bin/ruby /Users/mrkn/src/github.com/ruby/ruby/revision.h unchanged /Users/mrkn/.rbenv/shims/ruby --disable=gems -rrubygems -I/Users/mrkn/src/github.com/ruby/ruby/benchmark/lib /Users/mrkn/src/github.com/ruby/ruby/benchmark/benchmark-driver/exe/benchmark-driver \ --executables="compare-ruby::/Users/mrkn/.rbenv/versions/trunk-o3/bin/ruby -I.ext/common --disable-gem" \ --executables="built-ruby::./miniruby -I/Users/mrkn/src/github.com/ruby/ruby/lib -I. -I.ext/common -r/Users/mrkn/src/github.com/ruby/ruby/prelude --disable-gem" \ $(find /Users/mrkn/src/github.com/ruby/ruby/benchmark -maxdepth 1 -name '*complex_float_*.yml' -o -name '*complex_float_*.rb' | sort) Calculating ------------------------------------- compare-ruby built-ruby complex_float_add 9.132M 12.864M i/s - 1.000M times in 0.109511s 0.077734s complex_float_div 600.723k 627.878k i/s - 1.000M times in 1.664662s 1.592666s complex_float_mul 2.320M 2.347M i/s - 1.000M times in 0.431039s 0.426113s complex_float_new 1.473M 1.489M i/s - 1.000M times in 0.678791s 0.671750s complex_float_power 1.690M 1.722M i/s - 1.000M times in 0.591863s 0.580775s complex_float_sub 8.870M 9.516M i/s - 1.000M times in 0.112740s 0.105091s Comparison: complex_float_add built-ruby: 12864383.7 i/s compare-ruby: 9131502.8 i/s - 1.41x slower complex_float_div built-ruby: 627878.0 i/s compare-ruby: 600722.5 i/s - 1.05x slower complex_float_mul built-ruby: 2346795.3 i/s compare-ruby: 2319975.7 i/s - 1.01x slower complex_float_new built-ruby: 1488649.1 i/s compare-ruby: 1473207.5 i/s - 1.01x slower complex_float_power built-ruby: 1721837.2 i/s compare-ruby: 1689580.2 i/s - 1.02x slower complex_float_sub built-ruby: 9515562.7 i/s compare-ruby: 8869966.3 i/s - 1.07x slower ``` git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@66681 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2019-01-01 15:20:05 +03:00
VALUE rb_float_plus(VALUE x, VALUE y);
VALUE rb_int_minus(VALUE x, VALUE y);
VALUE rb_int_mul(VALUE x, VALUE y);
complex.c: Optimize Complex#* and Complex#** Optimize f_mul for the core numeric class components. This change improves the computation time of Complex#* and Complex#**. ``` $ make benchmark ITEM=complex_float_ COMPARE_RUBY=/Users/mrkn/.rbenv/versions/2.6.0/bin/ruby generating known_errors.inc known_errors.inc unchanged /Users/mrkn/src/github.com/ruby/ruby/revision.h unchanged /Users/mrkn/.rbenv/shims/ruby --disable=gems -rrubygems -I/Users/mrkn/src/github.com/ruby/ruby/benchmark/lib /Users/mrkn/src/github.com/ruby/ruby/benchmark/benchmark-driver/exe/benchmark-driver \ --executables="compare-ruby::/Users/mrkn/.rbenv/versions/2.6.0/bin/ruby -I.ext/common --disable-gem" \ --executables="built-ruby::./miniruby -I/Users/mrkn/src/github.com/ruby/ruby/lib -I. -I.ext/common -r/Users/mrkn/src/github.com/ruby/ruby/prelude --disable-gem" \ $(find /Users/mrkn/src/github.com/ruby/ruby/benchmark -maxdepth 1 -name '*complex_float_*.yml' -o -name '*complex_float_*.rb' | sort) Calculating ------------------------------------- compare-ruby built-ruby complex_float_add 6.558M 13.012M i/s - 1.000M times in 0.152480s 0.076850s complex_float_div 576.821k 567.969k i/s - 1.000M times in 1.733640s 1.760660s complex_float_mul 1.690M 2.628M i/s - 1.000M times in 0.591786s 0.380579s complex_float_new 1.350M 1.268M i/s - 1.000M times in 0.740669s 0.788762s complex_float_power 1.571M 1.835M i/s - 1.000M times in 0.636507s 0.544909s complex_float_sub 8.635M 8.779M i/s - 1.000M times in 0.115814s 0.113906s Comparison: complex_float_add built-ruby: 13012361.7 i/s compare-ruby: 6558237.1 i/s - 1.98x slower complex_float_div compare-ruby: 576821.0 i/s built-ruby: 567968.8 i/s - 1.02x slower complex_float_mul built-ruby: 2627575.4 i/s compare-ruby: 1689800.0 i/s - 1.55x slower complex_float_new compare-ruby: 1350130.8 i/s built-ruby: 1267809.6 i/s - 1.06x slower complex_float_power built-ruby: 1835168.8 i/s compare-ruby: 1571074.6 i/s - 1.17x slower complex_float_sub built-ruby: 8779168.8 i/s compare-ruby: 8634534.7 i/s - 1.02x slower ``` git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@66697 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2019-01-03 09:19:17 +03:00
VALUE rb_float_mul(VALUE x, VALUE y);
2019-08-10 08:30:34 +03:00
VALUE rb_float_div(VALUE x, VALUE y);
VALUE rb_int_idiv(VALUE x, VALUE y);
VALUE rb_int_modulo(VALUE x, VALUE y);
VALUE rb_int_round(VALUE num, int ndigits, enum ruby_num_rounding_mode mode);
VALUE rb_int2str(VALUE num, int base);
VALUE rb_dbl_hash(double d);
VALUE rb_fix_plus(VALUE x, VALUE y);
VALUE rb_fix_aref(VALUE fix, VALUE idx);
VALUE rb_int_gt(VALUE x, VALUE y);
int rb_float_cmp(VALUE x, VALUE y);
VALUE rb_float_gt(VALUE x, VALUE y);
VALUE rb_int_ge(VALUE x, VALUE y);
enum ruby_num_rounding_mode rb_num_get_rounding_option(VALUE opts);
double rb_int_fdiv_double(VALUE x, VALUE y);
VALUE rb_int_pow(VALUE x, VALUE y);
VALUE rb_float_pow(VALUE x, VALUE y);
VALUE rb_int_cmp(VALUE x, VALUE y);
VALUE rb_int_equal(VALUE x, VALUE y);
VALUE rb_int_divmod(VALUE x, VALUE y);
VALUE rb_int_and(VALUE x, VALUE y);
VALUE rb_int_lshift(VALUE x, VALUE y);
VALUE rb_int_div(VALUE x, VALUE y);
VALUE rb_int_abs(VALUE num);
VALUE rb_int_odd_p(VALUE num);
int rb_int_positive_p(VALUE num);
int rb_int_negative_p(VALUE num);
VALUE rb_num_pow(VALUE x, VALUE y);
2019-08-02 05:25:41 +03:00
VALUE rb_float_ceil(VALUE num, int ndigits);
static inline VALUE
rb_num_compare_with_zero(VALUE num, ID mid)
{
VALUE zero = INT2FIX(0);
VALUE r = rb_check_funcall(num, mid, 1, &zero);
if (r == Qundef) {
rb_cmperr(num, zero);
}
return r;
}
static inline int
rb_num_positive_int_p(VALUE num)
{
const ID mid = '>';
if (FIXNUM_P(num)) {
if (rb_method_basic_definition_p(rb_cInteger, mid))
return FIXNUM_POSITIVE_P(num);
}
else if (RB_TYPE_P(num, T_BIGNUM)) {
if (rb_method_basic_definition_p(rb_cInteger, mid))
return BIGNUM_POSITIVE_P(num);
}
return RTEST(rb_num_compare_with_zero(num, mid));
}
static inline int
rb_num_negative_int_p(VALUE num)
{
const ID mid = '<';
if (FIXNUM_P(num)) {
if (rb_method_basic_definition_p(rb_cInteger, mid))
return FIXNUM_NEGATIVE_P(num);
}
else if (RB_TYPE_P(num, T_BIGNUM)) {
if (rb_method_basic_definition_p(rb_cInteger, mid))
return BIGNUM_NEGATIVE_P(num);
}
return RTEST(rb_num_compare_with_zero(num, mid));
}
VALUE rb_float_abs(VALUE flt);
VALUE rb_float_equal(VALUE x, VALUE y);
VALUE rb_float_eql(VALUE x, VALUE y);
VALUE rb_flo_div_flo(VALUE x, VALUE y);
#if USE_FLONUM
#define RUBY_BIT_ROTL(v, n) (((v) << (n)) | ((v) >> ((sizeof(v) * 8) - n)))
#define RUBY_BIT_ROTR(v, n) (((v) >> (n)) | ((v) << ((sizeof(v) * 8) - n)))
#endif
static inline double
rb_float_flonum_value(VALUE v)
{
#if USE_FLONUM
if (v != (VALUE)0x8000000000000002) { /* LIKELY */
union {
double d;
VALUE v;
} t;
VALUE b63 = (v >> 63);
/* e: xx1... -> 011... */
/* xx0... -> 100... */
/* ^b63 */
t.v = RUBY_BIT_ROTR((2 - b63) | (v & ~(VALUE)0x03), 3);
return t.d;
}
#endif
return 0.0;
}
static inline double
rb_float_noflonum_value(VALUE v)
{
return ((struct RFloat *)v)->float_value;
}
static inline double
rb_float_value_inline(VALUE v)
{
if (FLONUM_P(v)) {
return rb_float_flonum_value(v);
}
return rb_float_noflonum_value(v);
}
static inline VALUE
rb_float_new_inline(double d)
{
#if USE_FLONUM
union {
double d;
VALUE v;
} t;
int bits;
t.d = d;
bits = (int)((VALUE)(t.v >> 60) & 0x7);
/* bits contains 3 bits of b62..b60. */
/* bits - 3 = */
/* b011 -> b000 */
/* b100 -> b001 */
if (t.v != 0x3000000000000000 /* 1.72723e-77 */ &&
!((bits-3) & ~0x01)) {
return (RUBY_BIT_ROTL(t.v, 3) & ~(VALUE)0x01) | 0x02;
}
else if (t.v == (VALUE)0) {
/* +0.0 */
return 0x8000000000000002;
}
/* out of range */
#endif
return rb_float_new_in_heap(d);
}
#define rb_float_value(v) rb_float_value_inline(v)
#define rb_float_new(d) rb_float_new_inline(d)
/* object.c */
void rb_obj_copy_ivar(VALUE dest, VALUE obj);
CONSTFUNC(VALUE rb_obj_equal(VALUE obj1, VALUE obj2));
CONSTFUNC(VALUE rb_obj_not(VALUE obj));
VALUE rb_class_search_ancestor(VALUE klass, VALUE super);
NORETURN(void rb_undefined_alloc(VALUE klass));
double rb_num_to_dbl(VALUE val);
VALUE rb_obj_dig(int argc, VALUE *argv, VALUE self, VALUE notfound);
VALUE rb_immutable_obj_clone(int, VALUE *, VALUE);
split insns.def into functions Contemporary C compilers are good at function inlining. They fold multiple functions into one. However they are not yet smart enough to unfold a function into several ones. So generally speaking, it is wiser for a C programmer to manually split C functions whenever possible. That should make rooms for compilers to optimize at will. Before this changeset insns.def was converted into single HUGE function called vm_exec_core(). By moving each instruction's core into individual functions, generated C source code is reduced from 3,428 lines to 2,847 lines. Looking at the generated assembly however, it seems my compiler (gcc 6.2) is extraordinary smart so that it inlines almost all functions I introduced in this changeset back into that vm_exec_core. On my machine compiled machine binary of the function does not shrink very much in size (28,432 bytes to 26,816 bytes, according to nm(1)). I believe this change is zero-cost. Several benchmarks I exercised showed no significant difference beyond error mergin. For instance 3 repeated runs of optcarrot benchmark on my machine resulted in: before this: 28.330329285707490, 27.513378371065920, 29.40420215754537 after this: 27.107195867280414, 25.549324021385907, 30.31581919050884 in fps (greater==faster). ---- * internal.h (rb_obj_not_equal): used from vm_insnhelper.c * insns.def: move vast majority of lines into vm_insnhelper.c * vm_insnhelper.c: moved here. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 13:58:49 +03:00
VALUE rb_obj_not_equal(VALUE obj1, VALUE obj2);
VALUE rb_convert_type_with_id(VALUE,int,const char*,ID);
VALUE rb_check_convert_type_with_id(VALUE,int,const char*,ID);
int rb_bool_expected(VALUE, const char *);
* include/ruby/ruby.h: constify RBasic::klass and add RBASIC_CLASS(obj) macro which returns a class of `obj'. This change is a part of RGENGC branch [ruby-trunk - Feature #8339]. * object.c: add new function rb_obj_reveal(). This function reveal interal (hidden) object by rb_obj_hide(). Note that do not change class before and after hiding. Only permitted example is: klass = RBASIC_CLASS(obj); rb_obj_hide(obj); .... rb_obj_reveal(obj, klass); TODO: API design. rb_obj_reveal() should be replaced with others. TODO: modify constified variables using cast may be harmful for compiler's analysis and optimizaton. Any idea to prohibt inserting RBasic::klass directly? If rename RBasic::klass and force to use RBASIC_CLASS(obj), then all codes such as `RBASIC(obj)->klass' will be compilation error. Is it acceptable? (We have similar experience at Ruby 1.9, for example "RARRAY(ary)->ptr" to "RARRAY_PTR(ary)". * internal.h: add some macros. * RBASIC_CLEAR_CLASS(obj) clear RBasic::klass to make it internal object. * RBASIC_SET_CLASS(obj, cls) set RBasic::klass. * RBASIC_SET_CLASS_RAW(obj, cls) same as RBASIC_SET_CLASS without write barrier (planned). * RCLASS_SET_SUPER(a, b) set super class of a. * array.c, class.c, compile.c, encoding.c, enum.c, error.c, eval.c, file.c, gc.c, hash.c, io.c, iseq.c, marshal.c, object.c, parse.y, proc.c, process.c, random.c, ruby.c, sprintf.c, string.c, thread.c, transcode.c, vm.c, vm_eval.c, win32/file.c: Use above macros and functions to access RBasic::klass. * ext/coverage/coverage.c, ext/readline/readline.c, ext/socket/ancdata.c, ext/socket/init.c, * ext/zlib/zlib.c: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40691 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 14:49:11 +04:00
struct RBasicRaw {
VALUE flags;
VALUE klass;
};
#define RBASIC_CLEAR_CLASS(obj) memset(&(((struct RBasicRaw *)((VALUE)(obj)))->klass), 0, sizeof(VALUE))
#define RBASIC_SET_CLASS_RAW(obj, cls) memcpy(&((struct RBasicRaw *)((VALUE)(obj)))->klass, &(cls), sizeof(VALUE))
* gc.c: support RGENGC. [ruby-trunk - Feature #8339] See this ticet about RGENGC. * gc.c: Add several flags: * RGENGC_DEBUG: if >0, then prints debug information. * RGENGC_CHECK_MODE: if >0, add assertions. * RGENGC_PROFILE: if >0, add profiling features. check GC.stat and GC::Profiler. * include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0). * array.c: add write barriers for T_ARRAY and generate sunny objects. * include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if you want to access raw pointers. If you modify the contents which pointer pointed, then you need to care write barrier. * bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects. * complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX and generate sunny objects. * rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write barriers for T_RATIONAL and generate sunny objects. * internal.h: add write barriers for RBasic::klass. * numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects. * object.c (rb_class_allocate_instance), range.c: generate sunny T_OBJECT objects. * string.c: add write barriers for T_STRING and generate sunny objects. * variable.c: add write barriers for ivars. * vm_insnhelper.c (vm_setivar): ditto. * include/ruby/ruby.h, debug.c: use two flags FL_WB_PROTECTED and FL_OLDGEN. * node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED): move flag bits. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
#define RBASIC_SET_CLASS(obj, cls) do { \
VALUE _obj_ = (obj); \
RB_OBJ_WRITE(_obj_, &((struct RBasicRaw *)(_obj_))->klass, cls); \
* gc.c: support RGENGC. [ruby-trunk - Feature #8339] See this ticet about RGENGC. * gc.c: Add several flags: * RGENGC_DEBUG: if >0, then prints debug information. * RGENGC_CHECK_MODE: if >0, add assertions. * RGENGC_PROFILE: if >0, add profiling features. check GC.stat and GC::Profiler. * include/ruby/ruby.h: disable RGENGC by default (USE_RGENGC == 0). * array.c: add write barriers for T_ARRAY and generate sunny objects. * include/ruby/ruby.h (RARRAY_PTR_USE): added. Use this macro if you want to access raw pointers. If you modify the contents which pointer pointed, then you need to care write barrier. * bignum.c, marshal.c, random.c: generate T_BIGNUM sunny objects. * complex.c, include/ruby/ruby.h: add write barriers for T_COMPLEX and generate sunny objects. * rational.c (nurat_s_new_internal), include/ruby/ruby.h: add write barriers for T_RATIONAL and generate sunny objects. * internal.h: add write barriers for RBasic::klass. * numeric.c (rb_float_new_in_heap): generate sunny T_FLOAT objects. * object.c (rb_class_allocate_instance), range.c: generate sunny T_OBJECT objects. * string.c: add write barriers for T_STRING and generate sunny objects. * variable.c: add write barriers for ivars. * vm_insnhelper.c (vm_setivar): ditto. * include/ruby/ruby.h, debug.c: use two flags FL_WB_PROTECTED and FL_OLDGEN. * node.h (NODE_FL_CREF_PUSHED_BY_EVAL, NODE_FL_CREF_OMOD_SHARED): move flag bits. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@40703 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-05-13 22:07:47 +04:00
} while (0)
/* parse.y */
#ifndef USE_SYMBOL_GC
#define USE_SYMBOL_GC 1
#endif
VALUE rb_parser_get_yydebug(VALUE);
VALUE rb_parser_set_yydebug(VALUE, VALUE);
RUBY_SYMBOL_EXPORT_BEGIN
VALUE rb_parser_set_context(VALUE, const struct rb_iseq_struct *, int);
RUBY_SYMBOL_EXPORT_END
void *rb_parser_load_file(VALUE parser, VALUE name);
int rb_is_const_name(VALUE name);
int rb_is_class_name(VALUE name);
int rb_is_global_name(VALUE name);
int rb_is_instance_name(VALUE name);
int rb_is_attrset_name(VALUE name);
int rb_is_local_name(VALUE name);
int rb_is_method_name(VALUE name);
int rb_is_junk_name(VALUE name);
PUREFUNC(int rb_is_const_sym(VALUE sym));
PUREFUNC(int rb_is_class_sym(VALUE sym));
PUREFUNC(int rb_is_global_sym(VALUE sym));
PUREFUNC(int rb_is_instance_sym(VALUE sym));
PUREFUNC(int rb_is_attrset_sym(VALUE sym));
PUREFUNC(int rb_is_local_sym(VALUE sym));
PUREFUNC(int rb_is_method_sym(VALUE sym));
PUREFUNC(int rb_is_junk_sym(VALUE sym));
ID rb_make_internal_id(void);
* parse.y: support Symbol GC. [ruby-trunk Feature #9634] See this ticket about Symbol GC. * include/ruby/ruby.h: Declare few functions. * rb_sym2id: almost same as old SYM2ID but support dynamic symbols. * rb_id2sym: almost same as old ID2SYM but support dynamic symbols. * rb_sym2str: almost same as `rb_id2str(SYM2ID(sym))` but not pin down a dynamic symbol. Declare a new struct. * struct RSymbol: represents a dynamic symbol as object in Ruby's heaps. Add few macros. * STATIC_SYM_P: check a static symbol. * DYNAMIC_SYM_P: check a dynamic symbol. * RSYMBOL: cast to RSymbol * gc.c: declare RSymbol. support T_SYMBOL. * internal.h: Declare few functions. * rb_gc_free_dsymbol: free up a dynamic symbol. GC call this function at a sweep phase. * rb_str_dynamic_intern: convert a string to a dynamic symbol. * rb_check_id_without_pindown: not pinning function. * rb_sym2id_without_pindown: ditto. * rb_check_id_cstr_without_pindown: ditto. * string.c (Init_String): String#intern and String#to_sym use rb_str_dynamic_intern. * template/id.h.tmpl: use LSB of ID as a flag for determining a static symbol, so we shift left other ruby_id_types. * string.c: use rb_sym2str instead `rb_id2str(SYM2ID(sym))` to avoid pinning. * load.c: use xx_without_pindown function at creating temporary ID to avoid pinning. * object.c: ditto. * sprintf.c: ditto. * struct.c: ditto. * thread.c: ditto. * variable.c: ditto. * vm_method.c: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@45426 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-03-26 08:57:47 +04:00
void rb_gc_free_dsymbol(VALUE);
ID rb_id_attrget(ID id);
/* proc.c */
VALUE rb_proc_location(VALUE self);
st_index_t rb_hash_proc(st_index_t hash, VALUE proc);
int rb_block_arity(void);
int rb_block_min_max_arity(int *max);
VALUE rb_func_proc_new(rb_block_call_func_t func, VALUE val);
VALUE rb_func_lambda_new(rb_block_call_func_t func, VALUE val, int min_argc, int max_argc);
VALUE rb_block_to_s(VALUE self, const struct rb_block *block, const char *additional_info);
/* process.c */
#define RB_MAX_GROUPS (65536)
struct waitpid_state;
struct rb_execarg {
union {
struct {
VALUE shell_script;
} sh;
struct {
VALUE command_name;
VALUE command_abspath; /* full path string or nil */
VALUE argv_str;
VALUE argv_buf;
} cmd;
} invoke;
VALUE redirect_fds;
VALUE envp_str;
VALUE envp_buf;
VALUE dup2_tmpbuf;
unsigned use_shell : 1;
unsigned pgroup_given : 1;
unsigned umask_given : 1;
unsigned unsetenv_others_given : 1;
unsigned unsetenv_others_do : 1;
unsigned close_others_given : 1;
unsigned close_others_do : 1;
unsigned chdir_given : 1;
unsigned new_pgroup_given : 1;
unsigned new_pgroup_flag : 1;
unsigned uid_given : 1;
unsigned gid_given : 1;
unsigned exception : 1;
unsigned exception_given : 1;
struct waitpid_state *waitpid_state; /* for async process management */
rb_pid_t pgroup_pgid; /* asis(-1), new pgroup(0), specified pgroup (0<V). */
VALUE rlimit_limits; /* Qfalse or [[rtype, softlim, hardlim], ...] */
mode_t umask_mask;
rb_uid_t uid;
rb_gid_t gid;
int close_others_maxhint;
VALUE fd_dup2;
VALUE fd_close;
VALUE fd_open;
VALUE fd_dup2_child;
VALUE env_modification; /* Qfalse or [[k1,v1], ...] */
VALUE path_env;
VALUE chdir_dir;
};
/* argv_str contains extra two elements.
* The beginning one is for /bin/sh used by exec_with_sh.
* The last one for terminating NULL used by execve.
* See rb_exec_fillarg() in process.c. */
#define ARGVSTR2ARGV(argv_str) ((char **)RB_IMEMO_TMPBUF_PTR(argv_str) + 1)
static inline size_t
ARGVSTR2ARGC(VALUE argv_str)
{
size_t i = 0;
char *const *p = ARGVSTR2ARGV(argv_str);
while (p[i++])
;
return i - 1;
}
rb_pid_t rb_fork_ruby(int *status);
void rb_last_status_clear(void);
/* range.c */
#define RANGE_BEG(r) (RSTRUCT(r)->as.ary[0])
#define RANGE_END(r) (RSTRUCT(r)->as.ary[1])
#define RANGE_EXCL(r) (RSTRUCT(r)->as.ary[2])
/* rational.c */
VALUE rb_rational_canonicalize(VALUE x);
VALUE rb_rational_uminus(VALUE self);
VALUE rb_rational_plus(VALUE self, VALUE other);
complex.c: Optimize Complex#* and Complex#** Optimize f_mul for the core numeric class components. This change improves the computation time of Complex#* and Complex#**. ``` $ make benchmark ITEM=complex_float_ COMPARE_RUBY=/Users/mrkn/.rbenv/versions/2.6.0/bin/ruby generating known_errors.inc known_errors.inc unchanged /Users/mrkn/src/github.com/ruby/ruby/revision.h unchanged /Users/mrkn/.rbenv/shims/ruby --disable=gems -rrubygems -I/Users/mrkn/src/github.com/ruby/ruby/benchmark/lib /Users/mrkn/src/github.com/ruby/ruby/benchmark/benchmark-driver/exe/benchmark-driver \ --executables="compare-ruby::/Users/mrkn/.rbenv/versions/2.6.0/bin/ruby -I.ext/common --disable-gem" \ --executables="built-ruby::./miniruby -I/Users/mrkn/src/github.com/ruby/ruby/lib -I. -I.ext/common -r/Users/mrkn/src/github.com/ruby/ruby/prelude --disable-gem" \ $(find /Users/mrkn/src/github.com/ruby/ruby/benchmark -maxdepth 1 -name '*complex_float_*.yml' -o -name '*complex_float_*.rb' | sort) Calculating ------------------------------------- compare-ruby built-ruby complex_float_add 6.558M 13.012M i/s - 1.000M times in 0.152480s 0.076850s complex_float_div 576.821k 567.969k i/s - 1.000M times in 1.733640s 1.760660s complex_float_mul 1.690M 2.628M i/s - 1.000M times in 0.591786s 0.380579s complex_float_new 1.350M 1.268M i/s - 1.000M times in 0.740669s 0.788762s complex_float_power 1.571M 1.835M i/s - 1.000M times in 0.636507s 0.544909s complex_float_sub 8.635M 8.779M i/s - 1.000M times in 0.115814s 0.113906s Comparison: complex_float_add built-ruby: 13012361.7 i/s compare-ruby: 6558237.1 i/s - 1.98x slower complex_float_div compare-ruby: 576821.0 i/s built-ruby: 567968.8 i/s - 1.02x slower complex_float_mul built-ruby: 2627575.4 i/s compare-ruby: 1689800.0 i/s - 1.55x slower complex_float_new compare-ruby: 1350130.8 i/s built-ruby: 1267809.6 i/s - 1.06x slower complex_float_power built-ruby: 1835168.8 i/s compare-ruby: 1571074.6 i/s - 1.17x slower complex_float_sub built-ruby: 8779168.8 i/s compare-ruby: 8634534.7 i/s - 1.02x slower ``` git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@66697 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2019-01-03 09:19:17 +03:00
VALUE rb_rational_mul(VALUE self, VALUE other);
VALUE rb_lcm(VALUE x, VALUE y);
VALUE rb_rational_reciprocal(VALUE x);
VALUE rb_cstr_to_rat(const char *, int);
VALUE rb_rational_abs(VALUE self);
VALUE rb_rational_cmp(VALUE self, VALUE other);
VALUE rb_rational_pow(VALUE self, VALUE other);
VALUE rb_numeric_quo(VALUE x, VALUE y);
2019-07-16 02:15:05 +03:00
VALUE rb_float_numerator(VALUE x);
2019-07-16 01:58:47 +03:00
VALUE rb_float_denominator(VALUE x);
/* re.c */
VALUE rb_reg_compile(VALUE str, int options, const char *sourcefile, int sourceline);
VALUE rb_reg_check_preprocess(VALUE);
long rb_reg_search0(VALUE, VALUE, long, int, int);
VALUE rb_reg_match_p(VALUE re, VALUE str, long pos);
bool rb_reg_start_with_p(VALUE re, VALUE str);
void rb_backref_set_string(VALUE string, long pos, long len);
void rb_match_unbusy(VALUE);
int rb_match_count(VALUE match);
int rb_match_nth_defined(int nth, VALUE match);
VALUE rb_reg_new_ary(VALUE ary, int options);
/* signal.c */
extern int ruby_enable_coredump;
int rb_get_next_signal(void);
/* string.c */
VALUE rb_fstring(VALUE);
VALUE rb_fstring_new(const char *ptr, long len);
#define rb_fstring_lit(str) rb_fstring_new((str), rb_strlen_lit(str))
#define rb_fstring_literal(str) rb_fstring_lit(str)
VALUE rb_fstring_cstr(const char *str);
#ifdef HAVE_BUILTIN___BUILTIN_CONSTANT_P
# define rb_fstring_cstr(str) RB_GNUC_EXTENSION_BLOCK( \
(__builtin_constant_p(str)) ? \
rb_fstring_new((str), (long)strlen(str)) : \
rb_fstring_cstr(str) \
)
#endif
#ifdef RUBY_ENCODING_H
VALUE rb_fstring_enc_new(const char *ptr, long len, rb_encoding *enc);
#define rb_fstring_enc_lit(str, enc) rb_fstring_enc_new((str), rb_strlen_lit(str), (enc))
#define rb_fstring_enc_literal(str, enc) rb_fstring_enc_lit(str, enc)
VALUE rb_fstring_enc_cstr(const char *ptr, rb_encoding *enc);
# ifdef HAVE_BUILTIN___BUILTIN_CONSTANT_P
# define rb_fstring_enc_cstr(str, enc) RB_GNUC_EXTENSION_BLOCK( \
(__builtin_constant_p(str)) ? \
rb_fstring_enc_new((str), (long)strlen(str), (enc)) : \
rb_fstring_enc_cstr(str, enc) \
)
# endif
#endif
int rb_str_buf_cat_escaped_char(VALUE result, unsigned int c, int unicode_p);
int rb_str_symname_p(VALUE);
VALUE rb_str_quote_unprintable(VALUE);
VALUE rb_id_quote_unprintable(ID);
#define QUOTE(str) rb_str_quote_unprintable(str)
#define QUOTE_ID(id) rb_id_quote_unprintable(id)
char *rb_str_fill_terminator(VALUE str, const int termlen);
void rb_str_change_terminator_length(VALUE str, const int oldtermlen, const int termlen);
VALUE rb_str_locktmp_ensure(VALUE str, VALUE (*func)(VALUE), VALUE arg);
VALUE rb_str_chomp_string(VALUE str, VALUE chomp);
#ifdef RUBY_ENCODING_H
VALUE rb_external_str_with_enc(VALUE str, rb_encoding *eenc);
VALUE rb_str_cat_conv_enc_opts(VALUE newstr, long ofs, const char *ptr, long len,
rb_encoding *from, int ecflags, VALUE ecopts);
VALUE rb_enc_str_scrub(rb_encoding *enc, VALUE str, VALUE repl);
VALUE rb_str_initialize(VALUE str, const char *ptr, long len, rb_encoding *enc);
#endif
#define STR_NOEMBED FL_USER1
#define STR_SHARED FL_USER2 /* = ELTS_SHARED */
#define STR_EMBED_P(str) (!FL_TEST_RAW((str), STR_NOEMBED))
#define STR_SHARED_P(s) FL_ALL_RAW((s), STR_NOEMBED|ELTS_SHARED)
#define is_ascii_string(str) (rb_enc_str_coderange(str) == ENC_CODERANGE_7BIT)
#define is_broken_string(str) (rb_enc_str_coderange(str) == ENC_CODERANGE_BROKEN)
size_t rb_str_memsize(VALUE);
VALUE rb_sym_proc_call(ID mid, int argc, const VALUE *argv, int kw_splat, VALUE passed_proc);
VALUE rb_sym_to_proc(VALUE sym);
char *rb_str_to_cstr(VALUE str);
VALUE rb_str_eql(VALUE str1, VALUE str2);
VALUE rb_obj_as_string_result(VALUE str, VALUE obj);
const char *ruby_escaped_char(int c);
VALUE rb_str_opt_plus(VALUE, VALUE);
Make opt_eq and opt_neq insns leaf # Benchmark zero? ``` require 'benchmark/ips' Numeric.class_eval do def ruby_zero? self == 0 end end Benchmark.ips do |x| x.report('0.zero?') { 0.ruby_zero? } x.report('1.zero?') { 1.ruby_zero? } x.compare! end ``` ## VM No significant impact for VM. ### before ruby 2.7.0dev (2019-08-04T02:56:02Z master 2d8c037e97) [x86_64-linux] 0.zero?: 21855445.5 i/s 1.zero?: 21770817.3 i/s - same-ish: difference falls within error ### after ruby 2.7.0dev (2019-08-04T11:17:10Z opt-eq-leaf 6404bebd6a) [x86_64-linux] 1.zero?: 21958912.3 i/s 0.zero?: 21881625.9 i/s - same-ish: difference falls within error ## JIT The performance improves about 1.23x. ### before ruby 2.7.0dev (2019-08-04T02:56:02Z master 2d8c037e97) +JIT [x86_64-linux] 0.zero?: 36343111.6 i/s 1.zero?: 36295153.3 i/s - same-ish: difference falls within error ### after ruby 2.7.0dev (2019-08-04T11:17:10Z opt-eq-leaf 6404bebd6a) +JIT [x86_64-linux] 0.zero?: 44740467.2 i/s 1.zero?: 44363616.1 i/s - same-ish: difference falls within error # Benchmark str == str / str != str ``` # frozen_string_literal: true require 'benchmark/ips' Benchmark.ips do |x| x.report('a == a') { 'a' == 'a' } x.report('a == b') { 'a' == 'b' } x.report('a != a') { 'a' != 'a' } x.report('a != b') { 'a' != 'b' } x.compare! end ``` ## VM No significant impact for VM. ### before ruby 2.7.0dev (2019-08-04T02:56:02Z master 2d8c037e97) [x86_64-linux] a == a: 27286219.0 i/s a != a: 24892389.5 i/s - 1.10x slower a == b: 23623635.8 i/s - 1.16x slower a != b: 21800958.0 i/s - 1.25x slower ### after ruby 2.7.0dev (2019-08-04T11:17:10Z opt-eq-leaf 6404bebd6a) [x86_64-linux] a == a: 27224016.2 i/s a != a: 24490109.5 i/s - 1.11x slower a == b: 23391052.4 i/s - 1.16x slower a != b: 21811321.7 i/s - 1.25x slower ## JIT The performance improves on JIT a little. ### before ruby 2.7.0dev (2019-08-04T02:56:02Z master 2d8c037e97) +JIT [x86_64-linux] a == a: 42010674.7 i/s a != a: 38920311.2 i/s - same-ish: difference falls within error a == b: 32574262.2 i/s - 1.29x slower a != b: 32099790.3 i/s - 1.31x slower ### after ruby 2.7.0dev (2019-08-04T11:17:10Z opt-eq-leaf 6404bebd6a) +JIT [x86_64-linux] a == a: 46902738.8 i/s a != a: 43097258.6 i/s - 1.09x slower a == b: 35822018.4 i/s - 1.31x slower a != b: 33377257.8 i/s - 1.41x slower This is needed towards Bug#15589. Closes: https://github.com/ruby/ruby/pull/2318
2019-08-04 14:11:00 +03:00
/* expect tail call optimization */
static inline VALUE
rb_str_eql_internal(const VALUE str1, const VALUE str2)
{
const long len = RSTRING_LEN(str1);
const char *ptr1, *ptr2;
if (len != RSTRING_LEN(str2)) return Qfalse;
if (!rb_str_comparable(str1, str2)) return Qfalse;
if ((ptr1 = RSTRING_PTR(str1)) == (ptr2 = RSTRING_PTR(str2)))
return Qtrue;
if (memcmp(ptr1, ptr2, len) == 0)
return Qtrue;
return Qfalse;
}
/* symbol.c */
#ifdef RUBY_ENCODING_H
VALUE rb_sym_intern(const char *ptr, long len, rb_encoding *enc);
VALUE rb_sym_intern_cstr(const char *ptr, rb_encoding *enc);
#ifdef __GNUC__
#define rb_sym_intern_cstr(ptr, enc) __extension__ ( \
{ \
(__builtin_constant_p(ptr)) ? \
rb_sym_intern((ptr), (long)strlen(ptr), (enc)) : \
rb_sym_intern_cstr((ptr), (enc)); \
})
#endif
#endif
VALUE rb_sym_intern_ascii(const char *ptr, long len);
VALUE rb_sym_intern_ascii_cstr(const char *ptr);
#ifdef __GNUC__
#define rb_sym_intern_ascii_cstr(ptr) __extension__ ( \
{ \
(__builtin_constant_p(ptr)) ? \
rb_sym_intern_ascii((ptr), (long)strlen(ptr)) : \
rb_sym_intern_ascii_cstr(ptr); \
})
#endif
VALUE rb_to_symbol_type(VALUE obj);
/* struct.c */
VALUE rb_struct_init_copy(VALUE copy, VALUE s);
VALUE rb_struct_lookup(VALUE s, VALUE idx);
VALUE rb_struct_s_keyword_init(VALUE klass);
/* time.c */
struct timeval rb_time_timeval(VALUE);
/* thread.c */
#define COVERAGE_INDEX_LINES 0
#define COVERAGE_INDEX_BRANCHES 1
#define COVERAGE_TARGET_LINES 1
#define COVERAGE_TARGET_BRANCHES 2
#define COVERAGE_TARGET_METHODS 4
#define COVERAGE_TARGET_ONESHOT_LINES 8
VALUE rb_obj_is_mutex(VALUE obj);
* vm_trace.c, vm_core.h: simplify tracing mechanism. (1) add rb_hook_list_t data structure which includes hooks, events (flag) and `need_clean' flag. If the last flag is true, then clean the hooks list. In other words, deleted hooks are contained by `hooks'. Cleanup process should run before traversing the list. (2) Change check mechanism See EXEC_EVENT_HOOK() in vm_core.h. (3) Add `raw' hooks APIs Normal hooks are guarded from exception by rb_protect(). However, this protection is overhead for too simple functions which never cause exceptions. `raw' hooks are executed without protection and faster. Now, we only provide registration APIs. All `raw' hooks are kicked under protection (same as normal hooks). * include/ruby/ruby.h: remove internal data definition and macros. * internal.h (ruby_suppress_tracing), vm_trace.c: rename ruby_suppress_tracing() to rb_suppress_tracing() and remove unused function parameter. * parse.y: fix to use renamed rb_suppress_tracing(). * thread.c (thread_create_core): no need to set RUBY_VM_VM. * vm.c (mark_event_hooks): move definition to vm_trace.c. * vm.c (ruby_vm_event_flags): add a global variable. This global variable represents all of Threads and VM's event masks (T1#events | T2#events | ... | VM#events). You can check the possibility kick trace func or not with ruby_vm_event_flags. ruby_vm_event_flags is maintained by vm_trace.c. * cont.c (fiber_switch, rb_cont_call): restore tracing status. [Feature #4347] * test/ruby/test_continuation.rb: ditto. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@36715 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-08-16 15:41:24 +04:00
VALUE rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg);
void rb_thread_execute_interrupts(VALUE th);
VALUE rb_get_coverages(void);
int rb_get_coverage_mode(void);
VALUE rb_default_coverage(int);
VALUE rb_thread_shield_new(void);
VALUE rb_thread_shield_wait(VALUE self);
VALUE rb_thread_shield_release(VALUE self);
VALUE rb_thread_shield_destroy(VALUE self);
int rb_thread_to_be_killed(VALUE thread);
void rb_mutex_allow_trap(VALUE self, int val);
VALUE rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data);
VALUE rb_mutex_owned_p(VALUE self);
/* transcode.c */
extern VALUE rb_cEncodingConverter;
#ifdef RUBY_ENCODING_H
size_t rb_econv_memsize(rb_econv_t *);
#endif
/* us_ascii.c */
#ifdef RUBY_ENCODING_H
extern rb_encoding OnigEncodingUS_ASCII;
#endif
/* util.c */
char *ruby_dtoa(double d_, int mode, int ndigits, int *decpt, int *sign, char **rve);
char *ruby_hdtoa(double d, const char *xdigs, int ndigits, int *decpt, int *sign, char **rve);
/* utf_8.c */
#ifdef RUBY_ENCODING_H
extern rb_encoding OnigEncodingUTF_8;
#endif
/* variable.c */
#if USE_TRANSIENT_HEAP
#define ROBJECT_TRANSIENT_FLAG FL_USER13
#define ROBJ_TRANSIENT_P(obj) FL_TEST_RAW((obj), ROBJECT_TRANSIENT_FLAG)
#define ROBJ_TRANSIENT_SET(obj) FL_SET_RAW((obj), ROBJECT_TRANSIENT_FLAG)
#define ROBJ_TRANSIENT_UNSET(obj) FL_UNSET_RAW((obj), ROBJECT_TRANSIENT_FLAG)
#else
#define ROBJ_TRANSIENT_P(obj) 0
#define ROBJ_TRANSIENT_SET(obj) ((void)0)
#define ROBJ_TRANSIENT_UNSET(obj) ((void)0)
#endif
void rb_gc_mark_global_tbl(void);
size_t rb_generic_ivar_memsize(VALUE);
VALUE rb_search_class_path(VALUE);
VALUE rb_attr_delete(VALUE, ID);
VALUE rb_ivar_lookup(VALUE obj, ID id, VALUE undef);
void rb_autoload_str(VALUE mod, ID id, VALUE file);
VALUE rb_autoload_at_p(VALUE, ID, int);
void rb_deprecate_constant(VALUE mod, const char *name);
NORETURN(VALUE rb_mod_const_missing(VALUE,VALUE));
rb_gvar_getter_t *rb_gvar_getter_function_of(const struct rb_global_entry *);
rb_gvar_setter_t *rb_gvar_setter_function_of(const struct rb_global_entry *);
bool rb_gvar_is_traced(const struct rb_global_entry *);
/* vm_insnhelper.h */
rb_serial_t rb_next_class_serial(void);
/* vm.c */
VALUE rb_obj_is_thread(VALUE obj);
void rb_vm_mark(void *ptr);
void Init_BareVM(void);
void Init_vm_objects(void);
PUREFUNC(VALUE rb_vm_top_self(void));
void rb_thread_recycle_stack_release(VALUE *);
VALUE *rb_thread_recycle_stack(size_t);
void rb_vm_change_state(void);
void rb_vm_inc_const_missing_count(void);
const void **rb_vm_get_insns_address_table(void);
VALUE rb_source_location(int *pline);
const char *rb_source_location_cstr(int *pline);
MJIT_STATIC void rb_vm_pop_cfunc_frame(void);
int rb_vm_add_root_module(ID id, VALUE module);
void rb_vm_check_redefinition_by_prepend(VALUE klass);
VALUE rb_yield_refine_block(VALUE refinement, VALUE refinements);
MJIT_STATIC VALUE ruby_vm_special_exception_copy(VALUE);
PUREFUNC(st_table *rb_vm_fstring_table(void));
/* vm_dump.c */
void rb_print_backtrace(void);
/* vm_eval.c */
void Init_vm_eval(void);
VALUE rb_adjust_argv_kw_splat(int *, const VALUE **, int *);
VALUE rb_current_realfilepath(void);
VALUE rb_check_block_call(VALUE, ID, int, const VALUE *, rb_block_call_func_t, VALUE);
typedef void rb_check_funcall_hook(int, VALUE, ID, int, const VALUE *, VALUE);
VALUE rb_check_funcall_with_hook(VALUE recv, ID mid, int argc, const VALUE *argv,
rb_check_funcall_hook *hook, VALUE arg);
VALUE rb_check_funcall_with_hook_kw(VALUE recv, ID mid, int argc, const VALUE *argv,
rb_check_funcall_hook *hook, VALUE arg, int kw_splat);
const char *rb_type_str(enum ruby_value_type type);
VALUE rb_check_funcall_default(VALUE, ID, int, const VALUE *, VALUE);
VALUE rb_check_funcall_default_kw(VALUE, ID, int, const VALUE *, VALUE, int);
VALUE rb_yield_1(VALUE val);
VALUE rb_yield_force_blockarg(VALUE values);
VALUE rb_lambda_call(VALUE obj, ID mid, int argc, const VALUE *argv,
rb_block_call_func_t bl_proc, int min_argc, int max_argc,
VALUE data2);
/* vm_insnhelper.c */
VALUE rb_equal_opt(VALUE obj1, VALUE obj2);
VALUE rb_eql_opt(VALUE obj1, VALUE obj2);
void Init_vm_stack_canary(void);
/* vm_method.c */
void Init_eval_method(void);
enum method_missing_reason {
MISSING_NOENTRY = 0x00,
MISSING_PRIVATE = 0x01,
MISSING_PROTECTED = 0x02,
MISSING_FCALL = 0x04,
MISSING_VCALL = 0x08,
MISSING_SUPER = 0x10,
MISSING_MISSING = 0x20,
MISSING_NONE = 0x40
};
struct rb_callable_method_entry_struct;
struct rb_method_definition_struct;
struct rb_execution_context_struct;
struct rb_control_frame_struct;
struct rb_calling_info;
struct rb_call_data;
struct rb_call_cache {
/* inline cache: keys */
rb_serial_t method_state;
rb_serial_t class_serial;
/* inline cache: values */
const struct rb_callable_method_entry_struct *me;
const struct rb_method_definition_struct *def;
VALUE (*call)(struct rb_execution_context_struct *ec,
struct rb_control_frame_struct *cfp,
struct rb_calling_info *calling,
struct rb_call_data *cd);
union {
unsigned int index; /* used by ivar */
enum method_missing_reason method_missing_reason; /* used by method_missing */
int inc_sp; /* used by cfunc */
} aux;
};
struct rb_call_info {
/* fixed at compile time */
ID mid;
unsigned int flag;
int orig_argc;
};
struct rb_call_data {
struct rb_call_cache cc;
struct rb_call_info ci;
};
RUBY_FUNC_EXPORTED
RUBY_FUNC_NONNULL(1, VALUE rb_funcallv_with_cc(struct rb_call_data*, VALUE, ID, int, const VALUE*));
#ifdef __GNUC__
# define rb_funcallv(recv, mid, argc, argv) \
__extension__({ \
static struct rb_call_data rb_funcallv_data = { { 0, }, { 0, }, }; \
rb_funcallv_with_cc(&rb_funcallv_data, recv, mid, argc, argv); \
})
#endif
/* miniprelude.c, prelude.c */
void Init_prelude(void);
/* vm_backtrace.c */
void Init_vm_backtrace(void);
VALUE rb_vm_thread_backtrace(int argc, const VALUE *argv, VALUE thval);
VALUE rb_vm_thread_backtrace_locations(int argc, const VALUE *argv, VALUE thval);
VALUE rb_make_backtrace(void);
void rb_backtrace_print_as_bugreport(void);
int rb_backtrace_p(VALUE obj);
VALUE rb_backtrace_to_str_ary(VALUE obj);
VALUE rb_backtrace_to_location_ary(VALUE obj);
void rb_backtrace_each(VALUE (*iter)(VALUE recv, VALUE str), VALUE output);
RUBY_SYMBOL_EXPORT_BEGIN
const char *rb_objspace_data_type_name(VALUE obj);
/* Temporary. This API will be removed (renamed). */
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd);
/* array.c (export) */
void rb_ary_detransient(VALUE a);
VALUE *rb_ary_ptr_use_start(VALUE ary);
void rb_ary_ptr_use_end(VALUE ary);
/* bignum.c (export) */
VALUE rb_big_mul_normal(VALUE x, VALUE y);
VALUE rb_big_mul_balance(VALUE x, VALUE y);
VALUE rb_big_mul_karatsuba(VALUE x, VALUE y);
VALUE rb_big_mul_toom3(VALUE x, VALUE y);
VALUE rb_big_sq_fast(VALUE x);
VALUE rb_big_divrem_normal(VALUE x, VALUE y);
VALUE rb_big2str_poweroftwo(VALUE x, int base);
VALUE rb_big2str_generic(VALUE x, int base);
VALUE rb_str2big_poweroftwo(VALUE arg, int base, int badcheck);
VALUE rb_str2big_normal(VALUE arg, int base, int badcheck);
VALUE rb_str2big_karatsuba(VALUE arg, int base, int badcheck);
#if defined(HAVE_LIBGMP) && defined(HAVE_GMP_H)
VALUE rb_big_mul_gmp(VALUE x, VALUE y);
VALUE rb_big_divrem_gmp(VALUE x, VALUE y);
VALUE rb_big2str_gmp(VALUE x, int base);
VALUE rb_str2big_gmp(VALUE arg, int base, int badcheck);
#endif
enum rb_int_parse_flags {
RB_INT_PARSE_SIGN = 0x01,
RB_INT_PARSE_UNDERSCORE = 0x02,
RB_INT_PARSE_PREFIX = 0x04,
RB_INT_PARSE_ALL = 0x07,
RB_INT_PARSE_DEFAULT = 0x07
};
VALUE rb_int_parse_cstr(const char *str, ssize_t len, char **endp, size_t *ndigits, int base, int flags);
/* enumerator.c (export) */
VALUE rb_arith_seq_new(VALUE obj, VALUE meth, int argc, VALUE const *argv,
rb_enumerator_size_func *size_fn,
VALUE beg, VALUE end, VALUE step, int excl);
/* error.c (export) */
int rb_bug_reporter_add(void (*func)(FILE *, void *), void *data);
NORETURN(void rb_unexpected_type(VALUE,int));
#undef Check_Type
#define Check_Type(v, t) \
(!RB_TYPE_P((VALUE)(v), (t)) || \
((t) == RUBY_T_DATA && RTYPEDDATA_P(v)) ? \
rb_unexpected_type((VALUE)(v), (t)) : (void)0)
static inline int
rb_typeddata_is_instance_of_inline(VALUE obj, const rb_data_type_t *data_type)
{
return RB_TYPE_P(obj, T_DATA) && RTYPEDDATA_P(obj) && (RTYPEDDATA_TYPE(obj) == data_type);
}
#define rb_typeddata_is_instance_of rb_typeddata_is_instance_of_inline
/* file.c (export) */
#if defined HAVE_READLINK && defined RUBY_ENCODING_H
VALUE rb_readlink(VALUE path, rb_encoding *enc);
#endif
#ifdef __APPLE__
VALUE rb_str_normalize_ospath(const char *ptr, long len);
#endif
/* hash.c (export) */
VALUE rb_hash_delete_entry(VALUE hash, VALUE key);
VALUE rb_ident_hash_new(void);
/* io.c (export) */
void rb_maygvl_fd_fix_cloexec(int fd);
int rb_gc_for_fd(int err);
void rb_write_error_str(VALUE mesg);
/* numeric.c (export) */
VALUE rb_int_positive_pow(long x, unsigned long y);
/* object.c (export) */
int rb_opts_exception_p(VALUE opts, int default_value);
/* process.c (export) */
int rb_exec_async_signal_safe(const struct rb_execarg *e, char *errmsg, size_t errmsg_buflen);
rb_pid_t rb_fork_async_signal_safe(int *status, int (*chfunc)(void*, char *, size_t), void *charg, VALUE fds, char *errmsg, size_t errmsg_buflen);
VALUE rb_execarg_new(int argc, const VALUE *argv, int accept_shell, int allow_exc_opt);
struct rb_execarg *rb_execarg_get(VALUE execarg_obj); /* dangerous. needs GC guard. */
int rb_execarg_addopt(VALUE execarg_obj, VALUE key, VALUE val);
void rb_execarg_parent_start(VALUE execarg_obj);
void rb_execarg_parent_end(VALUE execarg_obj);
int rb_execarg_run_options(const struct rb_execarg *e, struct rb_execarg *s, char* errmsg, size_t errmsg_buflen);
VALUE rb_execarg_extract_options(VALUE execarg_obj, VALUE opthash);
void rb_execarg_setenv(VALUE execarg_obj, VALUE env);
/* rational.c (export) */
VALUE rb_gcd(VALUE x, VALUE y);
VALUE rb_gcd_normal(VALUE self, VALUE other);
#if defined(HAVE_LIBGMP) && defined(HAVE_GMP_H)
VALUE rb_gcd_gmp(VALUE x, VALUE y);
#endif
/* signal.c (export) */
int rb_grantpt(int fd);
/* string.c (export) */
VALUE rb_str_tmp_frozen_acquire(VALUE str);
void rb_str_tmp_frozen_release(VALUE str, VALUE tmp);
#ifdef RUBY_ENCODING_H
/* internal use */
VALUE rb_setup_fake_str(struct RString *fake_str, const char *name, long len, rb_encoding *enc);
#endif
VALUE rb_str_upto_each(VALUE, VALUE, int, int (*each)(VALUE, VALUE), VALUE);
VALUE rb_str_upto_endless_each(VALUE, int (*each)(VALUE, VALUE), VALUE);
/* thread.c (export) */
int ruby_thread_has_gvl_p(void); /* for ext/fiddle/closure.c */
/* time.c (export) */
void ruby_reset_leap_second_info(void);
/* util.c (export) */
extern const signed char ruby_digit36_to_number_table[];
extern const char ruby_hexdigits[];
extern unsigned long ruby_scan_digits(const char *str, ssize_t len, int base, size_t *retlen, int *overflow);
/* variable.c (export) */
void rb_mark_generic_ivar(VALUE);
void rb_mv_generic_ivar(VALUE src, VALUE dst);
VALUE rb_const_missing(VALUE klass, VALUE name);
int rb_class_ivar_set(VALUE klass, ID vid, VALUE value);
void rb_iv_tbl_copy(VALUE dst, VALUE src);
/* gc.c (export) */
VALUE rb_wb_protected_newobj_of(VALUE, VALUE);
VALUE rb_wb_unprotected_newobj_of(VALUE, VALUE);
size_t rb_obj_memsize_of(VALUE);
void rb_gc_verify_internal_consistency(void);
#define RB_OBJ_GC_FLAGS_MAX 6
size_t rb_obj_gc_flags(VALUE, ID[], size_t);
void rb_gc_mark_values(long n, const VALUE *values);
void rb_gc_mark_vm_stack_values(long n, const VALUE *values);
#if IMEMO_DEBUG
VALUE rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line);
#define rb_imemo_new(type, v1, v2, v3, v0) rb_imemo_new_debug(type, v1, v2, v3, v0, __FILE__, __LINE__)
#else
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0);
#endif
/* random.c */
int ruby_fill_random_bytes(void *, size_t, int);
RUBY_SYMBOL_EXPORT_END
#define RUBY_DTRACE_CREATE_HOOK(name, arg) \
RUBY_DTRACE_HOOK(name##_CREATE, arg)
#define RUBY_DTRACE_HOOK(name, arg) \
do { \
if (UNLIKELY(RUBY_DTRACE_##name##_ENABLED())) { \
int dtrace_line; \
const char *dtrace_file = rb_source_location_cstr(&dtrace_line); \
if (!dtrace_file) dtrace_file = ""; \
RUBY_DTRACE_##name(arg, dtrace_file, dtrace_line); \
} \
} while (0)
#define RB_OBJ_BUILTIN_TYPE(obj) rb_obj_builtin_type(obj)
#define OBJ_BUILTIN_TYPE(obj) RB_OBJ_BUILTIN_TYPE(obj)
#ifdef __GNUC__
#define rb_obj_builtin_type(obj) \
__extension__({ \
VALUE arg_obj = (obj); \
RB_SPECIAL_CONST_P(arg_obj) ? -1 : \
RB_BUILTIN_TYPE(arg_obj); \
})
#else
static inline int
rb_obj_builtin_type(VALUE obj)
{
return RB_SPECIAL_CONST_P(obj) ? -1 :
RB_BUILTIN_TYPE(obj);
}
#endif
/* A macro for defining a flexible array, like: VALUE ary[FLEX_ARY_LEN]; */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
# define FLEX_ARY_LEN /* VALUE ary[]; */
#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
# define FLEX_ARY_LEN 0 /* VALUE ary[0]; */
#else
# define FLEX_ARY_LEN 1 /* VALUE ary[1]; */
#endif
/*
* For declaring bitfields out of non-unsigned int types:
* struct date {
* BITFIELD(enum months, month, 4);
* ...
* };
*/
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
# define BITFIELD(type, name, size) type name : size
#else
# define BITFIELD(type, name, size) unsigned int name : size
#endif
#if defined(_MSC_VER)
# define COMPILER_WARNING_PUSH __pragma(warning(push))
# define COMPILER_WARNING_POP __pragma(warning(pop))
# define COMPILER_WARNING_ERROR(flag) __pragma(warning(error: flag)))
# define COMPILER_WARNING_IGNORED(flag) __pragma(warning(suppress: flag)))
#elif defined(__clang__) /* clang 2.6 already had this feature */
# define COMPILER_WARNING_PUSH _Pragma("clang diagnostic push")
# define COMPILER_WARNING_POP _Pragma("clang diagnostic pop")
# define COMPILER_WARNING_SPECIFIER(kind, msg) \
clang diagnostic kind # msg
# define COMPILER_WARNING_ERROR(flag) \
COMPILER_WARNING_PRAGMA(COMPILER_WARNING_SPECIFIER(error, flag))
# define COMPILER_WARNING_IGNORED(flag) \
COMPILER_WARNING_PRAGMA(COMPILER_WARNING_SPECIFIER(ignored, flag))
#elif GCC_VERSION_SINCE(4, 6, 0)
/* https://gcc.gnu.org/onlinedocs/gcc-4.6.4/gcc/Diagnostic-Pragmas.html */
# define COMPILER_WARNING_PUSH _Pragma("GCC diagnostic push")
# define COMPILER_WARNING_POP _Pragma("GCC diagnostic pop")
# define COMPILER_WARNING_SPECIFIER(kind, msg) \
GCC diagnostic kind # msg
# define COMPILER_WARNING_ERROR(flag) \
COMPILER_WARNING_PRAGMA(COMPILER_WARNING_SPECIFIER(error, flag))
# define COMPILER_WARNING_IGNORED(flag) \
COMPILER_WARNING_PRAGMA(COMPILER_WARNING_SPECIFIER(ignored, flag))
#else /* other compilers to follow? */
# define COMPILER_WARNING_PUSH /* nop */
# define COMPILER_WARNING_POP /* nop */
# define COMPILER_WARNING_ERROR(flag) /* nop */
# define COMPILER_WARNING_IGNORED(flag) /* nop */
#endif
#define COMPILER_WARNING_PRAGMA(str) COMPILER_WARNING_PRAGMA_(str)
#define COMPILER_WARNING_PRAGMA_(str) _Pragma(#str)
#if defined(USE_UNALIGNED_MEMBER_ACCESS) && USE_UNALIGNED_MEMBER_ACCESS && \
(defined(__clang__) || GCC_VERSION_SINCE(9, 0, 0))
# define UNALIGNED_MEMBER_ACCESS(expr) __extension__({ \
COMPILER_WARNING_PUSH; \
COMPILER_WARNING_IGNORED(-Waddress-of-packed-member); \
typeof(expr) unaligned_member_access_result = (expr); \
COMPILER_WARNING_POP; \
unaligned_member_access_result; \
})
#else
# define UNALIGNED_MEMBER_ACCESS(expr) expr
#endif
#define UNALIGNED_MEMBER_PTR(ptr, mem) UNALIGNED_MEMBER_ACCESS(&(ptr)->mem)
#undef RB_OBJ_WRITE
#define RB_OBJ_WRITE(a, slot, b) UNALIGNED_MEMBER_ACCESS(rb_obj_write((VALUE)(a), (VALUE *)(slot), (VALUE)(b), __FILE__, __LINE__))
#if defined(__cplusplus)
#if 0
{ /* satisfy cc-mode */
#endif
} /* extern "C" { */
#endif
#endif /* RUBY_INTERNAL_H */