2020-04-10 08:11:40 +03:00
# ifndef INTERNAL_GC_H /*-*-C-*-vi:se ft=c:*/
# define INTERNAL_GC_H
/**
2020-04-08 07:28:13 +03:00
* @ author Ruby developers < ruby - core @ ruby - lang . org >
2019-11-29 09:18:34 +03:00
* @ copyright This file is a part of the programming language Ruby .
* Permission is hereby granted , to either redistribute and / or
* modify this file , provided that the conditions mentioned in the
* file COPYING are met . Consult the file for details .
2020-04-08 07:28:13 +03:00
* @ brief Internal header for GC .
2019-11-29 09:18:34 +03:00
*/
2020-05-08 12:31:09 +03:00
# include "ruby/internal/config.h"
2019-12-04 04:26:41 +03:00
# include <stddef.h> /* for size_t */
# include "internal/compilers.h" /* for __has_attribute */
# include "ruby/ruby.h" /* for rb_event_flag_t */
2023-02-17 18:51:16 +03:00
# include "vm_core.h" /* for GET_EC() */
2019-12-04 04:26:41 +03:00
2023-02-08 14:56:53 +03:00
# if defined(__x86_64__) && !defined(_ILP32) && defined(__GNUC__)
# define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movq\t%%rsp, %0" : "=r" (*(p)))
# elif defined(__i386) && defined(__GNUC__)
# define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movl\t%%esp, %0" : "=r" (*(p)))
# elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && !defined(_AIX) && !defined(__APPLE__) // Not Apple is NEEDED to unbreak ppc64 build on Darwin. Don't ask.
# define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr\t%0, %%r1" : "=r" (*(p)))
# elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && defined(_AIX)
# define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr %0,1" : "=r" (*(p)))
# elif defined(__POWERPC__) && defined(__APPLE__) // Darwin ppc and ppc64
# define SET_MACHINE_STACK_END(p) __asm__ volatile("mr %0, r1" : "=r" (*(p)))
# elif defined(__aarch64__) && defined(__GNUC__)
# define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mov\t%0, sp" : "=r" (*(p)))
# else
NOINLINE ( void rb_gc_set_stack_end ( VALUE * * stack_end_p ) ) ;
# define SET_MACHINE_STACK_END(p) rb_gc_set_stack_end(p)
# define USE_CONSERVATIVE_STACK_END
# endif
/* for GC debug */
# ifndef RUBY_MARK_FREE_DEBUG
# define RUBY_MARK_FREE_DEBUG 0
# endif
# if RUBY_MARK_FREE_DEBUG
extern int ruby_gc_debug_indent ;
static inline void
rb_gc_debug_indent ( void )
{
ruby_debug_printf ( " %*s " , ruby_gc_debug_indent , " " ) ;
}
static inline void
rb_gc_debug_body ( const char * mode , const char * msg , int st , void * ptr )
{
if ( st = = 0 ) {
ruby_gc_debug_indent - - ;
}
rb_gc_debug_indent ( ) ;
ruby_debug_printf ( " %s: %s %s (%p) \n " , mode , st ? " -> " : " <- " , msg , ptr ) ;
if ( st ) {
ruby_gc_debug_indent + + ;
}
fflush ( stdout ) ;
}
# define RUBY_MARK_ENTER(msg) rb_gc_debug_body("mark", (msg), 1, ptr)
# define RUBY_MARK_LEAVE(msg) rb_gc_debug_body("mark", (msg), 0, ptr)
# define RUBY_FREE_ENTER(msg) rb_gc_debug_body("free", (msg), 1, ptr)
# define RUBY_FREE_LEAVE(msg) rb_gc_debug_body("free", (msg), 0, ptr)
# define RUBY_GC_INFO rb_gc_debug_indent(), ruby_debug_printf
# else
# define RUBY_MARK_ENTER(msg)
# define RUBY_MARK_LEAVE(msg)
# define RUBY_FREE_ENTER(msg)
# define RUBY_FREE_LEAVE(msg)
# define RUBY_GC_INFO if(0)printf
# endif
# define RUBY_MARK_MOVABLE_UNLESS_NULL(ptr) do { \
VALUE markobj = ( ptr ) ; \
if ( RTEST ( markobj ) ) { rb_gc_mark_movable ( markobj ) ; } \
} while ( 0 )
# define RUBY_MARK_UNLESS_NULL(ptr) do { \
VALUE markobj = ( ptr ) ; \
if ( RTEST ( markobj ) ) { rb_gc_mark ( markobj ) ; } \
} while ( 0 )
# define RUBY_FREE_UNLESS_NULL(ptr) if(ptr){ruby_xfree(ptr);(ptr)=NULL;}
# if STACK_GROW_DIRECTION > 0
# define STACK_UPPER(x, a, b) (a)
# elif STACK_GROW_DIRECTION < 0
# define STACK_UPPER(x, a, b) (b)
# else
RUBY_EXTERN int ruby_stack_grow_direction ;
int ruby_get_stack_grow_direction ( volatile VALUE * addr ) ;
# define stack_growup_p(x) ( \
( ruby_stack_grow_direction ? \
ruby_stack_grow_direction : \
ruby_get_stack_grow_direction ( x ) ) > 0 )
# define STACK_UPPER(x, a, b) (stack_growup_p(x) ? (a) : (b))
# endif
/*
STACK_GROW_DIR_DETECTION is used with STACK_DIR_UPPER .
On most normal systems , stacks grow from high address to lower address . In
this case , STACK_DIR_UPPER ( a , b ) will return ( b ) , but on exotic systems where
the stack grows UP ( from low address to high address ) , it will return ( a ) .
*/
# if STACK_GROW_DIRECTION
# define STACK_GROW_DIR_DETECTION
# define STACK_DIR_UPPER(a,b) STACK_UPPER(0, (a), (b))
# else
# define STACK_GROW_DIR_DETECTION VALUE stack_grow_dir_detection
# define STACK_DIR_UPPER(a,b) STACK_UPPER(&stack_grow_dir_detection, (a), (b))
# endif
# define IS_STACK_DIR_UPPER() STACK_DIR_UPPER(1,0)
const char * rb_obj_info ( VALUE obj ) ;
const char * rb_raw_obj_info ( char * const buff , const size_t buff_size , VALUE obj ) ;
size_t rb_size_pool_slot_size ( unsigned char pool_id ) ;
2019-12-04 04:26:41 +03:00
struct rb_execution_context_struct ; /* in vm_core.h */
2020-02-09 10:56:40 +03:00
struct rb_objspace ; /* in vm_core.h */
2019-12-04 04:26:41 +03:00
2019-12-04 11:16:30 +03:00
# ifdef NEWOBJ_OF
# undef NEWOBJ_OF
# undef RB_NEWOBJ_OF
# endif
2019-12-04 04:26:41 +03:00
2023-02-17 18:51:16 +03:00
# define NEWOBJ_OF_0(var, T, c, f, s, ec) \
T * ( var ) = ( T * ) ( ( ( f ) & FL_WB_PROTECTED ) ? \
rb_wb_protected_newobj_of ( GET_EC ( ) , ( c ) , ( f ) & ~ FL_WB_PROTECTED , s ) : \
rb_wb_unprotected_newobj_of ( ( c ) , ( f ) , s ) )
# define NEWOBJ_OF_ec(var, T, c, f, s, ec) \
T * ( var ) = ( T * ) ( ( ( f ) & FL_WB_PROTECTED ) ? \
rb_wb_protected_newobj_of ( ( ec ) , ( c ) , ( f ) & ~ FL_WB_PROTECTED , s ) : \
rb_wb_unprotected_newobj_of ( ( c ) , ( f ) , s ) )
2020-12-06 09:40:16 +03:00
2023-02-17 18:51:16 +03:00
# define NEWOBJ_OF(var, T, c, f, s, ec) \
NEWOBJ_OF_HELPER ( ec ) ( var , T , c , f , s , ec )
2021-08-26 17:06:32 +03:00
2023-02-17 18:51:16 +03:00
# define NEWOBJ_OF_HELPER(ec) NEWOBJ_OF_ ## ec
2021-08-26 17:06:32 +03:00
2019-12-04 04:26:41 +03:00
# define RB_OBJ_GC_FLAGS_MAX 6 /* used in ext/objspace */
# ifndef USE_UNALIGNED_MEMBER_ACCESS
# define UNALIGNED_MEMBER_ACCESS(expr) (expr)
# elif ! USE_UNALIGNED_MEMBER_ACCESS
# define UNALIGNED_MEMBER_ACCESS(expr) (expr)
# elif ! (__has_warning("-Waddress-of-packed-member") || GCC_VERSION_SINCE(9, 0, 0))
# define UNALIGNED_MEMBER_ACCESS(expr) (expr)
# else
# include "internal / warnings.h"
# define UNALIGNED_MEMBER_ACCESS(expr) __extension__({ \
COMPILER_WARNING_PUSH ; \
COMPILER_WARNING_IGNORED ( - Waddress - of - packed - member ) ; \
__typeof__ ( expr ) unaligned_member_access_result = ( expr ) ; \
COMPILER_WARNING_POP ; \
unaligned_member_access_result ; \
} )
2023-01-20 17:04:27 +03:00
# define UNALIGNED_MEMBER_PTR(ptr, mem) __extension__({ \
COMPILER_WARNING_PUSH ; \
COMPILER_WARNING_IGNORED ( - Waddress - of - packed - member ) ; \
const volatile void * unaligned_member_ptr_result = & ( ptr ) - > mem ; \
COMPILER_WARNING_POP ; \
( __typeof__ ( ( ptr ) - > mem ) * ) unaligned_member_ptr_result ; \
} )
2019-12-04 04:26:41 +03:00
# endif
2023-01-20 17:04:27 +03:00
# ifndef UNALIGNED_MEMBER_PTR
# define UNALIGNED_MEMBER_PTR(ptr, mem) UNALIGNED_MEMBER_ACCESS(&(ptr)->mem)
# endif
2019-11-29 09:18:34 +03:00
2023-02-03 22:23:00 +03:00
# define RB_OBJ_WRITE_UNALIGNED(old, slot, young) do { \
VALUE * _slot = UNALIGNED_MEMBER_ACCESS ( slot ) ; \
RB_OBJ_WRITE ( old , _slot , young ) ; \
} while ( 0 )
2022-12-15 21:54:07 +03:00
// We use SIZE_POOL_COUNT number of shape IDs for transitions out of different size pools
2023-02-07 21:39:34 +03:00
// The next available shape ID will be the SPECIAL_CONST_SHAPE_ID
2023-03-27 16:40:24 +03:00
# ifndef SIZE_POOL_COUNT
2023-04-11 16:16:52 +03:00
# define SIZE_POOL_COUNT 5
2022-12-15 21:54:07 +03:00
# endif
2021-11-19 22:51:58 +03:00
2023-09-06 21:20:23 +03:00
/* Used in places that could malloc during, which can cause the GC to run. We
* need to temporarily disable the GC to allow the malloc to happen .
* Allocating memory during GC is a bad idea , so use this only when absolutely
* necessary . */
# define DURING_GC_COULD_MALLOC_REGION_START() \
assert ( rb_during_gc ( ) ) ; \
VALUE _already_disabled = rb_gc_disable_no_rest ( )
# define DURING_GC_COULD_MALLOC_REGION_END() \
if ( _already_disabled = = Qfalse ) rb_gc_enable ( )
2021-11-19 22:51:58 +03:00
typedef struct ractor_newobj_size_pool_cache {
2021-06-29 21:32:50 +03:00
struct RVALUE * freelist ;
struct heap_page * using_page ;
2021-11-19 22:51:58 +03:00
} rb_ractor_newobj_size_pool_cache_t ;
typedef struct ractor_newobj_cache {
2022-03-29 20:57:09 +03:00
size_t incremental_mark_step_allocated_slots ;
2021-11-19 22:51:58 +03:00
rb_ractor_newobj_size_pool_cache_t size_pool_caches [ SIZE_POOL_COUNT ] ;
2021-06-29 21:32:50 +03:00
} rb_ractor_newobj_cache_t ;
2019-11-29 09:18:34 +03:00
/* gc.c */
extern VALUE * ruby_initial_gc_stress_ptr ;
extern int ruby_disable_gc ;
2020-04-08 07:28:13 +03:00
RUBY_ATTR_MALLOC void * ruby_mimmalloc ( size_t size ) ;
2019-11-29 09:18:34 +03:00
void ruby_mimfree ( void * ptr ) ;
2023-04-05 09:40:07 +03:00
void rb_gc_prepare_heap ( void ) ;
2019-11-29 09:18:34 +03:00
void rb_objspace_set_event_hook ( const rb_event_flag_t event ) ;
2020-02-09 10:56:40 +03:00
VALUE rb_objspace_gc_enable ( struct rb_objspace * ) ;
VALUE rb_objspace_gc_disable ( struct rb_objspace * ) ;
2019-11-29 09:18:34 +03:00
void ruby_gc_set_params ( void ) ;
void rb_copy_wb_protected_attribute ( VALUE dest , VALUE obj ) ;
# if __has_attribute(alloc_align)
__attribute__ ( ( __alloc_align__ ( 1 ) ) )
# endif
2020-04-08 07:28:13 +03:00
RUBY_ATTR_MALLOC void * rb_aligned_malloc ( size_t , size_t ) RUBY_ATTR_ALLOC_SIZE ( ( 2 ) ) ;
2019-11-29 09:18:34 +03:00
size_t rb_size_mul_or_raise ( size_t , size_t , VALUE ) ; /* used in compile.c */
size_t rb_size_mul_add_or_raise ( size_t , size_t , size_t , VALUE ) ; /* used in iseq.h */
rb_shape_transition_shape_capa: use optimal sizes transitions
Previously the growth was 3(embed), 6, 12, 24, ...
With this change it's now 3(embed), 8, 16, 32, 64, ... by default.
However, since power of two isn't the best size for all allocators,
if `malloc_usable_size` is vailable, we use it to discover the best
offset.
On Linux/glibc 2.35 for instance, the growth will be 3(embed), 7, 15, 31
to avoid wasting 8B per object.
Test program:
```c
size_t test(size_t slots) {
size_t allocated = slots * VALUE_SIZE;
void *test_ptr = malloc(allocated);
size_t wasted = malloc_usable_size(test_ptr) - allocated;
free(test_ptr);
fprintf(stderr, "slots = %lu, wasted_bytes = %lu\n", slots, wasted);
return wasted;
}
int main(int argc, char *argv[]) {
size_t best_padding = 0;
size_t padding = 0;
for (padding = 0; padding <= 2; padding++) {
size_t wasted = test(8 - padding);
if (wasted == 0) {
best_padding = padding;
break;
}
}
size_t index = 0;
fprintf(stderr, "=============== naive ================\n");
size_t list_size = 4;
for (index = 0; index < 10; index++) {
test(list_size);
list_size *= 2;
}
fprintf(stderr, "=============== auto-padded (-%lu) ================\n", best_padding);
list_size = 4;
for (index = 0; index < 10; index ++) {
test(list_size - best_padding);
list_size *= 2;
}
fprintf(stderr, "\n\n");
return 0;
}
```
```
===== glibc ======
slots = 8, wasted_bytes = 8
slots = 7, wasted_bytes = 0
=============== naive ================
slots = 4, wasted_bytes = 8
slots = 8, wasted_bytes = 8
slots = 16, wasted_bytes = 8
slots = 32, wasted_bytes = 8
slots = 64, wasted_bytes = 8
slots = 128, wasted_bytes = 8
slots = 256, wasted_bytes = 8
slots = 512, wasted_bytes = 8
slots = 1024, wasted_bytes = 8
slots = 2048, wasted_bytes = 8
=============== auto-padded (-1) ================
slots = 3, wasted_bytes = 0
slots = 7, wasted_bytes = 0
slots = 15, wasted_bytes = 0
slots = 31, wasted_bytes = 0
slots = 63, wasted_bytes = 0
slots = 127, wasted_bytes = 0
slots = 255, wasted_bytes = 0
slots = 511, wasted_bytes = 0
slots = 1023, wasted_bytes = 0
slots = 2047, wasted_bytes = 0
```
```
========== jemalloc =======
slots = 8, wasted_bytes = 0
=============== naive ================
slots = 4, wasted_bytes = 0
slots = 8, wasted_bytes = 0
slots = 16, wasted_bytes = 0
slots = 32, wasted_bytes = 0
slots = 64, wasted_bytes = 0
slots = 128, wasted_bytes = 0
slots = 256, wasted_bytes = 0
slots = 512, wasted_bytes = 0
slots = 1024, wasted_bytes = 0
slots = 2048, wasted_bytes = 0
=============== auto-padded (-0) ================
slots = 4, wasted_bytes = 0
slots = 8, wasted_bytes = 0
slots = 16, wasted_bytes = 0
slots = 32, wasted_bytes = 0
slots = 64, wasted_bytes = 0
slots = 128, wasted_bytes = 0
slots = 256, wasted_bytes = 0
slots = 512, wasted_bytes = 0
slots = 1024, wasted_bytes = 0
slots = 2048, wasted_bytes = 0
```
2023-10-10 16:32:12 +03:00
size_t rb_malloc_grow_capa ( size_t current_capacity , size_t type_size ) ;
2020-04-08 07:28:13 +03:00
RUBY_ATTR_MALLOC void * rb_xmalloc_mul_add ( size_t , size_t , size_t ) ;
2022-02-15 17:55:53 +03:00
RUBY_ATTR_MALLOC void * rb_xcalloc_mul_add ( size_t , size_t , size_t ) ;
2019-11-29 09:18:34 +03:00
void * rb_xrealloc_mul_add ( const void * , size_t , size_t , size_t ) ;
2020-04-08 07:28:13 +03:00
RUBY_ATTR_MALLOC void * rb_xmalloc_mul_add_mul ( size_t , size_t , size_t , size_t ) ;
RUBY_ATTR_MALLOC void * rb_xcalloc_mul_add_mul ( size_t , size_t , size_t , size_t ) ;
2019-12-04 04:26:41 +03:00
static inline void * ruby_sized_xrealloc_inlined ( void * ptr , size_t new_size , size_t old_size ) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE ( ( 2 ) ) ;
static inline void * ruby_sized_xrealloc2_inlined ( void * ptr , size_t new_count , size_t elemsiz , size_t old_count ) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE ( ( 2 , 3 ) ) ;
static inline void ruby_sized_xfree_inlined ( void * ptr , size_t size ) ;
2020-08-26 09:42:15 +03:00
VALUE rb_class_allocate_instance ( VALUE klass ) ;
2021-07-28 20:11:27 +03:00
void rb_gc_ractor_newobj_cache_clear ( rb_ractor_newobj_cache_t * newobj_cache ) ;
2021-08-26 17:06:32 +03:00
size_t rb_gc_obj_slot_size ( VALUE obj ) ;
bool rb_gc_size_allocatable_p ( size_t size ) ;
2021-11-09 18:57:03 +03:00
int rb_objspace_garbage_object_p ( VALUE obj ) ;
2023-03-06 22:45:02 +03:00
bool rb_gc_is_ptr_to_obj ( void * ptr ) ;
VALUE rb_gc_id2ref_obj_tbl ( VALUE objid ) ;
VALUE rb_define_finalizer_no_check ( VALUE obj , VALUE block ) ;
2019-11-29 09:18:34 +03:00
2023-01-17 19:21:21 +03:00
void rb_gc_mark_and_move ( VALUE * ptr ) ;
2023-07-24 21:21:50 +03:00
void rb_gc_mark_weak ( VALUE * ptr ) ;
2023-09-05 20:34:46 +03:00
void rb_gc_remove_weak ( VALUE parent_obj , VALUE * ptr ) ;
2023-07-24 21:21:50 +03:00
2023-01-17 19:21:21 +03:00
# define rb_gc_mark_and_move_ptr(ptr) do { \
VALUE _obj = ( VALUE ) * ( ptr ) ; \
rb_gc_mark_and_move ( & _obj ) ; \
if ( _obj ! = ( VALUE ) * ( ptr ) ) * ( ptr ) = ( void * ) _obj ; \
} while ( 0 )
2019-11-29 09:18:34 +03:00
RUBY_SYMBOL_EXPORT_BEGIN
2023-02-08 14:56:53 +03:00
/* exports for objspace module */
size_t rb_objspace_data_type_memsize ( VALUE obj ) ;
void rb_objspace_reachable_objects_from ( VALUE obj , void ( func ) ( VALUE , void * ) , void * data ) ;
void rb_objspace_reachable_objects_from_root ( void ( func ) ( const char * category , VALUE , void * ) , void * data ) ;
int rb_objspace_markable_object_p ( VALUE obj ) ;
int rb_objspace_internal_object_p ( VALUE obj ) ;
int rb_objspace_marked_object_p ( VALUE obj ) ;
void rb_objspace_each_objects (
int ( * callback ) ( void * start , void * end , size_t stride , void * data ) ,
void * data ) ;
void rb_objspace_each_objects_without_setup (
int ( * callback ) ( void * , void * , size_t , void * ) ,
void * data ) ;
size_t rb_gc_obj_slot_size ( VALUE obj ) ;
VALUE rb_gc_disable_no_rest ( void ) ;
2019-11-29 09:18:34 +03:00
/* gc.c (export) */
2019-12-04 04:26:41 +03:00
const char * rb_objspace_data_type_name ( VALUE obj ) ;
2023-02-17 18:51:16 +03:00
VALUE rb_wb_protected_newobj_of ( struct rb_execution_context_struct * , VALUE , VALUE , size_t ) ;
2021-03-30 15:34:14 +03:00
VALUE rb_wb_unprotected_newobj_of ( VALUE , VALUE , size_t ) ;
2019-11-29 09:18:34 +03:00
size_t rb_obj_memsize_of ( VALUE ) ;
void rb_gc_verify_internal_consistency ( void ) ;
size_t rb_obj_gc_flags ( VALUE , ID [ ] , size_t ) ;
void rb_gc_mark_values ( long n , const VALUE * values ) ;
void rb_gc_mark_vm_stack_values ( long n , const VALUE * values ) ;
2023-08-31 17:35:56 +03:00
void rb_gc_update_values ( long n , VALUE * values ) ;
2019-12-04 04:26:41 +03:00
void * ruby_sized_xrealloc ( void * ptr , size_t new_size , size_t old_size ) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE ( ( 2 ) ) ;
void * ruby_sized_xrealloc2 ( void * ptr , size_t new_count , size_t element_size , size_t old_count ) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE ( ( 2 , 3 ) ) ;
void ruby_sized_xfree ( void * x , size_t size ) ;
2019-11-29 09:18:34 +03:00
RUBY_SYMBOL_EXPORT_END
2019-12-04 04:26:41 +03:00
int rb_ec_stack_check ( struct rb_execution_context_struct * ec ) ;
void rb_gc_writebarrier_remember ( VALUE obj ) ;
const char * rb_obj_info ( VALUE obj ) ;
# if defined(HAVE_MALLOC_USABLE_SIZE) || defined(HAVE_MALLOC_SIZE) || defined(_WIN32)
static inline void *
ruby_sized_xrealloc_inlined ( void * ptr , size_t new_size , size_t old_size )
{
return ruby_xrealloc ( ptr , new_size ) ;
}
static inline void *
ruby_sized_xrealloc2_inlined ( void * ptr , size_t new_count , size_t elemsiz , size_t old_count )
{
return ruby_xrealloc2 ( ptr , new_count , elemsiz ) ;
}
static inline void
ruby_sized_xfree_inlined ( void * ptr , size_t size )
{
ruby_xfree ( ptr ) ;
}
# define SIZED_REALLOC_N(x, y, z, w) REALLOC_N(x, y, z)
2023-05-28 14:00:20 +03:00
static inline void *
ruby_sized_realloc_n ( void * ptr , size_t new_count , size_t element_size , size_t old_count )
{
return ruby_xrealloc2 ( ptr , new_count , element_size ) ;
}
2019-12-04 04:26:41 +03:00
# else
static inline void *
ruby_sized_xrealloc_inlined ( void * ptr , size_t new_size , size_t old_size )
{
return ruby_sized_xrealloc ( ptr , new_size , old_size ) ;
}
static inline void *
ruby_sized_xrealloc2_inlined ( void * ptr , size_t new_count , size_t elemsiz , size_t old_count )
{
return ruby_sized_xrealloc2 ( ptr , new_count , elemsiz , old_count ) ;
}
static inline void
ruby_sized_xfree_inlined ( void * ptr , size_t size )
{
ruby_sized_xfree ( ptr , size ) ;
}
# define SIZED_REALLOC_N(v, T, m, n) \
2019-12-27 03:20:58 +03:00
( ( v ) = ( T * ) ruby_sized_xrealloc2 ( ( void * ) ( v ) , ( m ) , sizeof ( T ) , ( n ) ) )
2019-12-04 04:26:41 +03:00
2023-05-28 14:00:20 +03:00
static inline void *
ruby_sized_realloc_n ( void * ptr , size_t new_count , size_t element_size , size_t old_count )
{
return ruby_sized_xrealloc2 ( ptr , new_count , element_size , old_count ) ;
}
2019-12-04 04:26:41 +03:00
# endif /* HAVE_MALLOC_USABLE_SIZE */
# define ruby_sized_xrealloc ruby_sized_xrealloc_inlined
# define ruby_sized_xrealloc2 ruby_sized_xrealloc2_inlined
# define ruby_sized_xfree ruby_sized_xfree_inlined
2019-11-29 09:18:34 +03:00
# endif /* INTERNAL_GC_H */