зеркало из https://github.com/github/ruby.git
Change darray size to size_t and add functions that use GC malloc
Changes size and capacity of darray to size_t to support more elements. Adds functions to darray that use GC allocation functions.
This commit is contained in:
Родитель
f9abb286fb
Коммит
71afa8164d
128
darray.h
128
darray.h
|
@ -13,7 +13,7 @@
|
|||
//
|
||||
// Example:
|
||||
// rb_darray(char) char_array = NULL;
|
||||
// if (!rb_darray_append(&char_array, 'e')) abort();
|
||||
// rb_darray_append(&char_array, 'e');
|
||||
// printf("pushed %c\n", *rb_darray_ref(char_array, 0));
|
||||
// rb_darray_free(char_array);
|
||||
//
|
||||
|
@ -21,35 +21,46 @@
|
|||
|
||||
// Copy an element out of the array. Warning: not bounds checked.
|
||||
//
|
||||
// T rb_darray_get(rb_darray(T) ary, int32_t idx);
|
||||
// T rb_darray_get(rb_darray(T) ary, size_t idx);
|
||||
//
|
||||
#define rb_darray_get(ary, idx) ((ary)->data[(idx)])
|
||||
|
||||
// Assign to an element. Warning: not bounds checked.
|
||||
//
|
||||
// void rb_darray_set(rb_darray(T) ary, int32_t idx, T element);
|
||||
// void rb_darray_set(rb_darray(T) ary, size_t idx, T element);
|
||||
//
|
||||
#define rb_darray_set(ary, idx, element) ((ary)->data[(idx)] = (element))
|
||||
|
||||
// Get a pointer to an element. Warning: not bounds checked.
|
||||
//
|
||||
// T *rb_darray_ref(rb_darray(T) ary, int32_t idx);
|
||||
// T *rb_darray_ref(rb_darray(T) ary, size_t idx);
|
||||
//
|
||||
#define rb_darray_ref(ary, idx) (&((ary)->data[(idx)]))
|
||||
|
||||
// Copy a new element into the array. Return 1 on success and 0 on failure.
|
||||
// ptr_to_ary is evaluated multiple times.
|
||||
// Copy a new element into the array. ptr_to_ary is evaluated multiple times.
|
||||
//
|
||||
// bool rb_darray_append(rb_darray(T) *ptr_to_ary, T element);
|
||||
// void rb_darray_append(rb_darray(T) *ptr_to_ary, T element);
|
||||
//
|
||||
#define rb_darray_append(ptr_to_ary, element) ( \
|
||||
rb_darray_ensure_space((ptr_to_ary), sizeof(**(ptr_to_ary)), sizeof((*(ptr_to_ary))->data[0])) ? ( \
|
||||
rb_darray_set(*(ptr_to_ary), \
|
||||
(*(ptr_to_ary))->meta.size, \
|
||||
(element)), \
|
||||
++((*(ptr_to_ary))->meta.size), \
|
||||
1 \
|
||||
) : 0)
|
||||
// TODO: replace this with rb_darray_append_with_gc when YJIT moves to Rust.
|
||||
//
|
||||
#define rb_darray_append(ptr_to_ary, element) do { \
|
||||
rb_darray_ensure_space((ptr_to_ary), sizeof(**(ptr_to_ary)), \
|
||||
sizeof((*(ptr_to_ary))->data[0]), realloc); \
|
||||
rb_darray_set(*(ptr_to_ary), \
|
||||
(*(ptr_to_ary))->meta.size, \
|
||||
(element)); \
|
||||
(*(ptr_to_ary))->meta.size++; \
|
||||
} while (0)
|
||||
|
||||
#define rb_darray_append_with_gc(ptr_to_ary, element) do { \
|
||||
rb_darray_ensure_space((ptr_to_ary), sizeof(**(ptr_to_ary)), \
|
||||
sizeof((*(ptr_to_ary))->data[0]), ruby_xrealloc); \
|
||||
rb_darray_set(*(ptr_to_ary), \
|
||||
(*(ptr_to_ary))->meta.size, \
|
||||
(element)); \
|
||||
(*(ptr_to_ary))->meta.size++; \
|
||||
} while (0)
|
||||
|
||||
|
||||
// Last element of the array
|
||||
//
|
||||
|
@ -68,21 +79,30 @@
|
|||
// Iterate over items of the array in a for loop
|
||||
//
|
||||
#define rb_darray_foreach(ary, idx_name, elem_ptr_var) \
|
||||
for (int idx_name = 0; idx_name < rb_darray_size(ary) && ((elem_ptr_var) = rb_darray_ref(ary, idx_name)); ++idx_name)
|
||||
for (size_t idx_name = 0; idx_name < rb_darray_size(ary) && ((elem_ptr_var) = rb_darray_ref(ary, idx_name)); ++idx_name)
|
||||
|
||||
// Iterate over valid indicies in the array in a for loop
|
||||
//
|
||||
#define rb_darray_for(ary, idx_name) \
|
||||
for (int idx_name = 0; idx_name < rb_darray_size(ary); ++idx_name)
|
||||
for (size_t idx_name = 0; idx_name < rb_darray_size(ary); ++idx_name)
|
||||
|
||||
// Make a dynamic array of a certain size. All bytes backing the elements are set to zero.
|
||||
// Return 1 on success and 0 on failure.
|
||||
//
|
||||
// Note that NULL is a valid empty dynamic array.
|
||||
//
|
||||
// bool rb_darray_make(rb_darray(T) *ptr_to_ary, int32_t size);
|
||||
// void rb_darray_make(rb_darray(T) *ptr_to_ary, size_t size);
|
||||
//
|
||||
#define rb_darray_make(ptr_to_ary, size) rb_darray_make_impl((ptr_to_ary), size, sizeof(**(ptr_to_ary)), sizeof((*(ptr_to_ary))->data[0]))
|
||||
// TODO: replace this with rb_darray_make_with_gc with YJIT moves to Rust.
|
||||
//
|
||||
#define rb_darray_make(ptr_to_ary, size) \
|
||||
rb_darray_make_impl((ptr_to_ary), size, sizeof(**(ptr_to_ary)), \
|
||||
sizeof((*(ptr_to_ary))->data[0]), calloc)
|
||||
|
||||
|
||||
#define rb_darray_make_with_gc(ptr_to_ary, size) \
|
||||
rb_darray_make_impl((ptr_to_ary), size, sizeof(**(ptr_to_ary)), \
|
||||
sizeof((*(ptr_to_ary))->data[0]), ruby_xcalloc)
|
||||
|
||||
// Set the size of the array to zero without freeing the backing memory.
|
||||
// Allows reusing the same array.
|
||||
|
@ -90,13 +110,13 @@
|
|||
#define rb_darray_clear(ary) (ary->meta.size = 0)
|
||||
|
||||
typedef struct rb_darray_meta {
|
||||
int32_t size;
|
||||
int32_t capa;
|
||||
size_t size;
|
||||
size_t capa;
|
||||
} rb_darray_meta_t;
|
||||
|
||||
// Get the size of the dynamic array.
|
||||
//
|
||||
static inline int32_t
|
||||
static inline size_t
|
||||
rb_darray_size(const void *ary)
|
||||
{
|
||||
const rb_darray_meta_t *meta = ary;
|
||||
|
@ -105,7 +125,7 @@ rb_darray_size(const void *ary)
|
|||
|
||||
// Get the capacity of the dynamic array.
|
||||
//
|
||||
static inline int32_t
|
||||
static inline size_t
|
||||
rb_darray_capa(const void *ary)
|
||||
{
|
||||
const rb_darray_meta_t *meta = ary;
|
||||
|
@ -114,49 +134,55 @@ rb_darray_capa(const void *ary)
|
|||
|
||||
// Free the dynamic array.
|
||||
//
|
||||
// TODO: replace this with rb_darray_free_with_gc when YJIT moves to Rust.
|
||||
//
|
||||
static inline void
|
||||
rb_darray_free(void *ary)
|
||||
{
|
||||
free(ary);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rb_darray_free_with_gc(void *ary)
|
||||
{
|
||||
rb_darray_meta_t *meta = ary;
|
||||
ruby_sized_xfree(ary, meta->capa);
|
||||
}
|
||||
|
||||
// Internal function. Calculate buffer size on malloc heap.
|
||||
static inline size_t
|
||||
rb_darray_buffer_size(int32_t capacity, size_t header_size, size_t element_size)
|
||||
rb_darray_buffer_size(size_t capacity, size_t header_size, size_t element_size)
|
||||
{
|
||||
if (capacity == 0) return 0;
|
||||
return header_size + (size_t)capacity * element_size;
|
||||
return header_size + capacity * element_size;
|
||||
}
|
||||
|
||||
// Internal function
|
||||
// Ensure there is space for one more element. Return 1 on success and 0 on failure.
|
||||
// Ensure there is space for one more element.
|
||||
// Note: header_size can be bigger than sizeof(rb_darray_meta_t) when T is __int128_t, for example.
|
||||
static inline int
|
||||
rb_darray_ensure_space(void *ptr_to_ary, size_t header_size, size_t element_size)
|
||||
static inline void
|
||||
rb_darray_ensure_space(void *ptr_to_ary, size_t header_size, size_t element_size, void *(*realloc_impl)(void *, size_t))
|
||||
{
|
||||
rb_darray_meta_t **ptr_to_ptr_to_meta = ptr_to_ary;
|
||||
rb_darray_meta_t *meta = *ptr_to_ptr_to_meta;
|
||||
int32_t current_capa = rb_darray_capa(meta);
|
||||
if (rb_darray_size(meta) < current_capa) return 1;
|
||||
size_t current_capa = rb_darray_capa(meta);
|
||||
if (rb_darray_size(meta) < current_capa) return;
|
||||
|
||||
int32_t new_capa;
|
||||
// Calculate new capacity
|
||||
if (current_capa == 0) {
|
||||
new_capa = 1;
|
||||
}
|
||||
else {
|
||||
int64_t doubled = 2 * (int64_t)current_capa;
|
||||
new_capa = (int32_t)doubled;
|
||||
if (new_capa != doubled) return 0;
|
||||
}
|
||||
// Double the capacity
|
||||
size_t new_capa = current_capa == 0 ? 1 : current_capa * 2;
|
||||
|
||||
// Calculate new buffer size
|
||||
size_t current_buffer_size = rb_darray_buffer_size(current_capa, header_size, element_size);
|
||||
size_t new_buffer_size = rb_darray_buffer_size(new_capa, header_size, element_size);
|
||||
if (new_buffer_size <= current_buffer_size) return 0;
|
||||
if (new_buffer_size <= current_buffer_size) {
|
||||
rb_bug("rb_darray_ensure_space: overflow");
|
||||
}
|
||||
|
||||
rb_darray_meta_t *doubled_ary = realloc(meta, new_buffer_size);
|
||||
if (!doubled_ary) return 0;
|
||||
// TODO: replace with rb_xrealloc_mul_add(meta, new_capa, element_size, header_size);
|
||||
rb_darray_meta_t *doubled_ary = realloc_impl(meta, new_buffer_size);
|
||||
if (!doubled_ary) {
|
||||
rb_bug("rb_darray_ensure_space: failed");
|
||||
}
|
||||
|
||||
if (meta == NULL) {
|
||||
// First allocation. Initialize size. On subsequence allocations
|
||||
|
@ -169,22 +195,23 @@ rb_darray_ensure_space(void *ptr_to_ary, size_t header_size, size_t element_size
|
|||
// We don't have access to the type of the dynamic array in function context.
|
||||
// Write out result with memcpy to avoid strict aliasing issue.
|
||||
memcpy(ptr_to_ary, &doubled_ary, sizeof(doubled_ary));
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
rb_darray_make_impl(void *ptr_to_ary, int32_t array_size, size_t header_size, size_t element_size)
|
||||
static inline void
|
||||
rb_darray_make_impl(void *ptr_to_ary, size_t array_size, size_t header_size, size_t element_size, void *(*calloc_impl)(size_t, size_t))
|
||||
{
|
||||
rb_darray_meta_t **ptr_to_ptr_to_meta = ptr_to_ary;
|
||||
if (array_size < 0) return 0;
|
||||
if (array_size == 0) {
|
||||
*ptr_to_ptr_to_meta = NULL;
|
||||
return 1;
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: replace with rb_xcalloc_mul_add(array_size, element_size, header_size)
|
||||
size_t buffer_size = rb_darray_buffer_size(array_size, header_size, element_size);
|
||||
rb_darray_meta_t *meta = calloc(buffer_size, 1);
|
||||
if (!meta) return 0;
|
||||
rb_darray_meta_t *meta = calloc_impl(buffer_size, 1);
|
||||
if (!meta) {
|
||||
rb_bug("rb_darray_make_impl: failed");
|
||||
}
|
||||
|
||||
meta->size = array_size;
|
||||
meta->capa = array_size;
|
||||
|
@ -192,7 +219,6 @@ rb_darray_make_impl(void *ptr_to_ary, int32_t array_size, size_t header_size, si
|
|||
// We don't have access to the type of the dynamic array in function context.
|
||||
// Write out result with memcpy to avoid strict aliasing issue.
|
||||
memcpy(ptr_to_ary, &meta, sizeof(meta));
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* RUBY_DARRAY_H */
|
||||
|
|
7
gc.c
7
gc.c
|
@ -12040,6 +12040,13 @@ rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
|
|||
return ruby_xmalloc(w);
|
||||
}
|
||||
|
||||
void *
|
||||
rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
|
||||
{
|
||||
size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
|
||||
return ruby_xcalloc(w, 1);
|
||||
}
|
||||
|
||||
void *
|
||||
rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
|
||||
{
|
||||
|
|
|
@ -99,6 +99,7 @@ RUBY_ATTR_MALLOC void *rb_aligned_malloc(size_t, size_t) RUBY_ATTR_ALLOC_SIZE((2
|
|||
size_t rb_size_mul_or_raise(size_t, size_t, VALUE); /* used in compile.c */
|
||||
size_t rb_size_mul_add_or_raise(size_t, size_t, size_t, VALUE); /* used in iseq.h */
|
||||
RUBY_ATTR_MALLOC void *rb_xmalloc_mul_add(size_t, size_t, size_t);
|
||||
RUBY_ATTR_MALLOC void *rb_xcalloc_mul_add(size_t, size_t, size_t);
|
||||
void *rb_xrealloc_mul_add(const void *, size_t, size_t, size_t);
|
||||
RUBY_ATTR_MALLOC void *rb_xmalloc_mul_add_mul(size_t, size_t, size_t, size_t);
|
||||
RUBY_ATTR_MALLOC void *rb_xcalloc_mul_add_mul(size_t, size_t, size_t, size_t);
|
||||
|
|
|
@ -97,9 +97,7 @@ jit_mov_gc_ptr(jitstate_t *jit, codeblock_t *cb, x86opnd_t reg, VALUE ptr)
|
|||
uint32_t ptr_offset = cb->write_pos - sizeof(VALUE);
|
||||
|
||||
if (!SPECIAL_CONST_P(ptr)) {
|
||||
if (!rb_darray_append(&jit->block->gc_object_offsets, ptr_offset)) {
|
||||
rb_bug("allocation failed");
|
||||
}
|
||||
rb_darray_append(&jit->block->gc_object_offsets, ptr_offset);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -196,7 +194,7 @@ static void
|
|||
record_global_inval_patch(const codeblock_t *cb, uint32_t outline_block_target_pos)
|
||||
{
|
||||
struct codepage_patch patch_point = { cb->write_pos, outline_block_target_pos };
|
||||
if (!rb_darray_append(&global_inval_patches, patch_point)) rb_bug("allocation failed");
|
||||
rb_darray_append(&global_inval_patches, patch_point);
|
||||
}
|
||||
|
||||
static bool jit_guard_known_klass(jitstate_t *jit, ctx_t *ctx, VALUE known_klass, insn_opnd_t insn_opnd, VALUE sample_instance, const int max_chain_depth, uint8_t *side_exit);
|
||||
|
|
|
@ -558,9 +558,8 @@ add_block_version(block_t *block)
|
|||
if ((unsigned)casted != body->iseq_size) {
|
||||
rb_bug("iseq too large");
|
||||
}
|
||||
if (!rb_darray_make(&body->yjit_blocks, casted)) {
|
||||
rb_bug("allocation failed");
|
||||
}
|
||||
|
||||
rb_darray_make(&body->yjit_blocks, casted);
|
||||
|
||||
#if YJIT_STATS
|
||||
// First block compiled for this iseq
|
||||
|
@ -572,9 +571,7 @@ add_block_version(block_t *block)
|
|||
rb_yjit_block_array_t *block_array_ref = rb_darray_ref(body->yjit_blocks, blockid.idx);
|
||||
|
||||
// Add the new block
|
||||
if (!rb_darray_append(block_array_ref, block)) {
|
||||
rb_bug("allocation failed");
|
||||
}
|
||||
rb_darray_append(block_array_ref, block);
|
||||
|
||||
{
|
||||
// By writing the new block to the iseq, the iseq now
|
||||
|
|
|
@ -633,8 +633,8 @@ rb_yjit_constant_ic_update(const rb_iseq_t *const iseq, IC ic)
|
|||
rb_yjit_block_array_t getinlinecache_blocks = yjit_get_version_array(iseq, get_insn_idx);
|
||||
|
||||
// Put a bound for loop below to be defensive
|
||||
const int32_t initial_version_count = rb_darray_size(getinlinecache_blocks);
|
||||
for (int32_t iteration=0; iteration<initial_version_count; ++iteration) {
|
||||
const size_t initial_version_count = rb_darray_size(getinlinecache_blocks);
|
||||
for (size_t iteration=0; iteration<initial_version_count; ++iteration) {
|
||||
getinlinecache_blocks = yjit_get_version_array(iseq, get_insn_idx);
|
||||
|
||||
if (rb_darray_size(getinlinecache_blocks) > 0) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче