avoid overflow in integer multiplication

This changeset basically replaces `ruby_xmalloc(x * y)` into
`ruby_xmalloc2(x, y)`.  Some convenient functions are also
provided for instance `rb_xmalloc_mul_add(x, y, z)` which allocates
x * y + z byes.
This commit is contained in:
卜部昌平 2019-10-07 16:56:08 +09:00
Родитель a220410be7
Коммит 7e0ae1698d
12 изменённых файлов: 282 добавлений и 49 удалений

Просмотреть файл

@ -869,6 +869,13 @@ compile_data_alloc(rb_iseq_t *iseq, size_t size)
return compile_data_alloc_with_arena(arena, size);
}
static inline void *
compile_data_alloc2(rb_iseq_t *iseq, size_t x, size_t y)
{
size_t size = rb_size_mul_or_raise(x, y, rb_eRuntimeError);
return compile_data_alloc(iseq, size);
}
static INSN *
compile_data_alloc_insn(rb_iseq_t *iseq)
{
@ -1127,7 +1134,7 @@ new_insn_body(rb_iseq_t *iseq, int line_no, enum ruby_vminsn_type insn_id, int a
if (argc > 0) {
int i;
va_init_list(argv, argc);
operands = (VALUE *)compile_data_alloc(iseq, sizeof(VALUE) * argc);
operands = compile_data_alloc2(iseq, sizeof(VALUE), argc);
for (i = 0; i < argc; i++) {
VALUE v = va_arg(argv, VALUE);
operands[i] = v;
@ -1168,7 +1175,7 @@ new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_cal
static INSN *
new_insn_send(rb_iseq_t *iseq, int line_no, ID id, VALUE argc, const rb_iseq_t *blockiseq, VALUE flag, struct rb_call_info_kw_arg *keywords)
{
VALUE *operands = (VALUE *)compile_data_alloc(iseq, sizeof(VALUE) * 3);
VALUE *operands = compile_data_alloc2(iseq, sizeof(VALUE), 3);
operands[0] = (VALUE)new_callinfo(iseq, id, FIX2INT(argc), FIX2INT(flag), keywords, blockiseq != NULL);
operands[1] = Qfalse; /* cache */
operands[2] = (VALUE)blockiseq;
@ -2080,8 +2087,10 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
insns_info = ALLOC_N(struct iseq_insn_info_entry, insn_num);
positions = ALLOC_N(unsigned int, insn_num);
body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, body->is_size);
body->ci_entries = (struct rb_call_info *)ruby_xmalloc(sizeof(struct rb_call_info) * body->ci_size +
sizeof(struct rb_call_info_with_kwarg) * body->ci_kw_size);
body->ci_entries =
rb_xmalloc_mul_add_mul(
sizeof(struct rb_call_info), body->ci_size,
sizeof(struct rb_call_info_with_kwarg), body->ci_kw_size);
MEMZERO(body->ci_entries + body->ci_size, struct rb_call_info_with_kwarg, body->ci_kw_size); /* need to clear ci_kw entries */
body->cc_entries = ZALLOC_N(struct rb_call_cache, body->ci_size + body->ci_kw_size);
@ -3197,7 +3206,7 @@ insn_set_specialized_instruction(rb_iseq_t *iseq, INSN *iobj, int insn_id)
if (insn_id == BIN(opt_neq)) {
VALUE *old_operands = iobj->operands;
iobj->operand_size = 4;
iobj->operands = (VALUE *)compile_data_alloc(iseq, iobj->operand_size * sizeof(VALUE));
iobj->operands = compile_data_alloc2(iseq, iobj->operand_size, sizeof(VALUE));
iobj->operands[0] = (VALUE)new_callinfo(iseq, idEq, 1, 0, NULL, FALSE);
iobj->operands[1] = Qfalse; /* CALL_CACHE */
iobj->operands[2] = old_operands[0];
@ -3367,7 +3376,7 @@ new_unified_insn(rb_iseq_t *iseq,
if (argc > 0) {
ptr = operands =
(VALUE *)compile_data_alloc(iseq, sizeof(VALUE) * argc);
compile_data_alloc2(iseq, sizeof(VALUE), argc);
}
/* copy operands */
@ -3823,7 +3832,8 @@ compile_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *const ret,
node = root_node->nd_head;
{
int len = (int)node->nd_alen / 2;
struct rb_call_info_kw_arg *kw_arg = (struct rb_call_info_kw_arg *)ruby_xmalloc(sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (len - 1));
struct rb_call_info_kw_arg *kw_arg =
rb_xmalloc_mul_add(len - 1, sizeof(VALUE), sizeof(struct rb_call_info_kw_arg));
VALUE *keywords = kw_arg->keywords;
int i = 0;
kw_arg->keyword_len = len;
@ -8776,7 +8786,7 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
}
if (argc > 0) {
argv = compile_data_alloc(iseq, sizeof(VALUE) * argc);
argv = compile_data_alloc2(iseq, sizeof(VALUE), argc);
for (j=0; j<argc; j++) {
VALUE op = rb_ary_entry(obj, j+1);
switch (insn_op_type((VALUE)insn_id, j)) {
@ -9381,9 +9391,10 @@ ibf_dump_overwrite(struct ibf_dump *dump, void *buff, unsigned int size, long of
}
static void *
ibf_load_alloc(const struct ibf_load *load, ibf_offset_t offset, int size)
ibf_load_alloc(const struct ibf_load *load, ibf_offset_t offset, size_t x, size_t y)
{
void *buff = ruby_xmalloc(size);
void *buff = ruby_xmalloc2(x, y);
size_t size = x * y;
memcpy(buff, load->current_buffer->buff + offset, size);
return buff;
}
@ -9393,7 +9404,7 @@ ibf_load_alloc(const struct ibf_load *load, ibf_offset_t offset, int size)
#define IBF_W(b, type, n) (IBF_W_ALIGN(type), (type *)(VALUE)IBF_WP(b, type, n))
#define IBF_WV(variable) ibf_dump_write(dump, &(variable), sizeof(variable))
#define IBF_WP(b, type, n) ibf_dump_write(dump, (b), sizeof(type) * (n))
#define IBF_R(val, type, n) (type *)ibf_load_alloc(load, IBF_OFFSET(val), sizeof(type) * (n))
#define IBF_R(val, type, n) (type *)ibf_load_alloc(load, IBF_OFFSET(val), sizeof(type), (n))
#define IBF_ZERO(variable) memset(&(variable), 0, sizeof(variable))
static int
@ -9667,7 +9678,7 @@ ibf_load_code(const struct ibf_load *load, const rb_iseq_t *iseq, ibf_offset_t b
{
unsigned int code_index;
ibf_offset_t reading_pos = bytecode_offset;
VALUE *code = ruby_xmalloc(sizeof(VALUE) * iseq_size);
VALUE *code = ALLOC_N(VALUE, iseq_size);
struct rb_iseq_constant_body *load_body = iseq->body;
struct rb_call_info *ci_entries = load_body->ci_entries;
@ -9884,7 +9895,7 @@ static unsigned int *
ibf_load_insns_info_positions(const struct ibf_load *load, ibf_offset_t positions_offset, unsigned int size)
{
ibf_offset_t reading_pos = positions_offset;
unsigned int *positions = ruby_xmalloc(sizeof(unsigned int) * size);
unsigned int *positions = ALLOC_N(unsigned int, size);
unsigned int last = 0;
unsigned int i;
@ -10039,8 +10050,10 @@ ibf_load_ci_entries(const struct ibf_load *load,
unsigned int i;
struct rb_call_info *ci_entries = ruby_xmalloc(sizeof(struct rb_call_info) * ci_size +
sizeof(struct rb_call_info_with_kwarg) * ci_kw_size);
struct rb_call_info *ci_entries =
rb_xmalloc_mul_add_mul(
sizeof(struct rb_call_info), ci_size,
sizeof(struct rb_call_info_with_kwarg), ci_kw_size);
struct rb_call_info_with_kwarg *ci_kw_entries = (struct rb_call_info_with_kwarg *)&ci_entries[ci_size];
for (i = 0; i < ci_size; i++) {
@ -10060,7 +10073,8 @@ ibf_load_ci_entries(const struct ibf_load *load,
int keyword_len = (int)ibf_load_small_value(load, &reading_pos);
ci_kw_entries[i].kw_arg = ruby_xmalloc(sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (keyword_len - 1));
ci_kw_entries[i].kw_arg =
rb_xmalloc_mul_add(keyword_len - 1, sizeof(VALUE), sizeof(struct rb_call_info_kw_arg));
ci_kw_entries[i].kw_arg->keyword_len = keyword_len;

14
dir.c
Просмотреть файл

@ -1343,8 +1343,20 @@ sys_enc_warning_in(const char *func, const char *mesg, rb_encoding *enc)
#define sys_warning(val, enc) \
((flags & GLOB_VERBOSE) ? sys_enc_warning_in(RUBY_FUNCTION_NAME_STRING, (val), (enc)) :(void)0)
static inline void *
glob_alloc_n(size_t x, size_t y)
{
size_t z;
if (rb_mul_size_overflow(x, y, SSIZE_MAX, &z)) {
rb_memerror(); /* or...? */
}
else {
return malloc(z);
}
}
#define GLOB_ALLOC(type) ((type *)malloc(sizeof(type)))
#define GLOB_ALLOC_N(type, n) ((type *)malloc(sizeof(type) * (n)))
#define GLOB_ALLOC_N(type, n) ((type *)glob_alloc_n(sizeof(type), n))
#define GLOB_REALLOC(ptr, size) realloc((ptr), (size))
#define GLOB_FREE(ptr) free(ptr)
#define GLOB_JUMP_TAG(status) (((status) == -1) ? rb_memerror() : rb_jump_tag(status))

Просмотреть файл

@ -266,7 +266,7 @@ enc_table_expand(int newsize)
if (enc_table.size >= newsize) return newsize;
newsize = (newsize + 7) / 8 * 8;
ent = xrealloc(enc_table.list, sizeof(*enc_table.list) * newsize);
ent = REALLOC_N(enc_table.list, struct rb_encoding_entry, newsize);
memset(ent + enc_table.size, 0, sizeof(*ent)*(newsize - enc_table.size));
enc_table.list = ent;
enc_table.size = newsize;

217
gc.c
Просмотреть файл

@ -80,6 +80,157 @@
#define rb_setjmp(env) RUBY_SETJMP(env)
#define rb_jmp_buf rb_jmpbuf_t
#if defined(_MSC_VER) && defined(_WIN64)
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif
/* Expecting this struct to be elminated by function inlinings */
struct optional {
bool left;
size_t right;
};
static inline struct optional
size_mul_overflow(size_t x, size_t y)
{
bool p;
size_t z;
#if 0
#elif defined(HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW)
p = __builtin_mul_overflow(x, y, &z);
#elif defined(DSIZE_T)
RB_GNUC_EXTENSION DSIZE_T dx = x;
RB_GNUC_EXTENSION DSIZE_T dy = y;
RB_GNUC_EXTENSION DSIZE_T dz = dx * dy;
p = dz > SIZE_MAX;
z = (size_t)dz;
#elif defined(_MSC_VER) && defined(_WIN64)
unsigned __int64 dp;
unsigned __int64 dz = _umul128(x, y, &dp);
p = (bool)dp;
z = (size_t)dz;
#else
/* https://wiki.sei.cmu.edu/confluence/display/c/INT30-C.+Ensure+that+unsigned+integer+operations+do+not+wrap */
p = (y != 0) && (x > SIZE_MAX / y);
z = x * y;
#endif
return (struct optional) { p, z, };
}
static inline struct optional
size_add_overflow(size_t x, size_t y)
{
size_t z;
bool p;
#if 0
#elif defined(HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW)
p = __builtin_add_overflow(x, y, &z);
#elif defined(DSIZE_T)
RB_GNUC_EXTENSION DSIZE_T dx = x;
RB_GNUC_EXTENSION DSIZE_T dy = x;
RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
p = dz > SIZE_MAX;
z = (size_t)dz;
#else
z = x + y;
p = z < y;
#endif
return (struct optional) { p, z, };
}
static inline struct optional
size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
{
struct optional t = size_mul_overflow(x, y);
struct optional u = size_add_overflow(t.right, z);
return (struct optional) { t.left || u.left, u.right };
}
static inline struct optional
size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
{
struct optional t = size_mul_overflow(x, y);
struct optional u = size_mul_overflow(z, w);
struct optional v = size_add_overflow(t.right, u.right);
return (struct optional) { t.left || u.left || v.left, v.right };
}
static inline size_t
size_mul_or_raise(size_t x, size_t y, VALUE exc)
{
struct optional t = size_mul_overflow(x, y);
if (LIKELY(!t.left)) {
return t.right;
}
else {
rb_raise(
exc,
"integer overflow: %"PRIuSIZE
" * %"PRIuSIZE
" > %"PRIuSIZE,
x, y, SIZE_MAX);
}
}
size_t
rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
{
return size_mul_or_raise(x, y, exc);
}
static inline size_t
size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
{
struct optional t = size_mul_add_overflow(x, y, z);
if (LIKELY(!t.left)) {
return t.right;
}
else {
rb_raise(
exc,
"integer overflow: %"PRIuSIZE
" * %"PRIuSIZE
" + %"PRIuSIZE
" > %"PRIuSIZE,
x, y, z, SIZE_MAX);
}
}
size_t
rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
{
return size_mul_add_or_raise(x, y, z, exc);
}
static inline size_t
size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
{
struct optional t = size_mul_add_mul_overflow(x, y, z, w);
if (LIKELY(!t.left)) {
return t.right;
}
else {
rb_raise(
exc,
"integer overflow: %"PRIdSIZE
" * %"PRIdSIZE
" + %"PRIdSIZE
" * %"PRIdSIZE
" > %"PRIdSIZE,
x, y, z, w, SIZE_MAX);
}
}
#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
/* trick the compiler into thinking a external signal handler uses this */
volatile VALUE rb_gc_guarded_val;
@ -1410,10 +1561,16 @@ RVALUE_WHITE_P(VALUE obj)
--------------------------- ObjectSpace -----------------------------
*/
static inline void *
calloc1(size_t n)
{
return calloc(1, n);
}
rb_objspace_t *
rb_objspace_alloc(void)
{
rb_objspace_t *objspace = calloc(1, sizeof(rb_objspace_t));
rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
malloc_limit = gc_params.malloc_limit_min;
list_head_init(&objspace->eden_heap.pages);
list_head_init(&objspace->tomb_heap.pages);
@ -1466,7 +1623,7 @@ static void
heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
{
struct heap_page **sorted;
size_t size = next_length * sizeof(struct heap_page *);
size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
@ -1626,7 +1783,7 @@ heap_page_allocate(rb_objspace_t *objspace)
}
/* assign heap_page entry */
page = (struct heap_page *)calloc(1, sizeof(struct heap_page));
page = calloc1(sizeof(struct heap_page));
if (page == 0) {
rb_aligned_free(page_body);
rb_memerror();
@ -7595,7 +7752,8 @@ static struct heap_page **
allocate_page_list(rb_objspace_t *objspace, page_compare_func_t *comparator)
{
size_t total_pages = heap_eden->total_pages;
struct heap_page *page = 0, **page_list = malloc(total_pages * sizeof(struct heap_page *));
size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
struct heap_page *page = 0, **page_list = malloc(size);
int i = 0;
list_for_each(&heap_eden->pages, page, page_node) {
@ -9753,11 +9911,7 @@ objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
static inline size_t
xmalloc2_size(const size_t count, const size_t elsize)
{
size_t ret;
if (rb_mul_size_overflow(count, elsize, SSIZE_MAX, &ret)) {
ruby_malloc_size_overflow(count, elsize);
}
return ret;
return size_mul_or_raise(count, elsize, rb_eArgError);
}
static void *
@ -9897,7 +10051,7 @@ objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
/* hit */
}
else {
data = malloc(sizeof(size_t) * 2);
data = malloc(xmalloc2_size(2, sizeof(size_t)));
if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
data[0] = data[1] = 0;
st_insert(malloc_info_file_table, key, (st_data_t)data);
@ -9961,7 +10115,7 @@ objspace_xcalloc(rb_objspace_t *objspace, size_t size)
void *mem;
size = objspace_malloc_prepare(objspace, size);
TRY_WITH_GC(mem = calloc(1, size));
TRY_WITH_GC(mem = calloc1(size));
return objspace_malloc_fixup(objspace, mem, size);
}
@ -9996,10 +10150,7 @@ ruby_xrealloc_body(void *ptr, size_t new_size)
void *
ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
{
size_t len = size * n;
if (n != 0 && size != len / n) {
rb_raise(rb_eArgError, "realloc: possible integer overflow");
}
size_t len = xmalloc2_size(n, size);
return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
}
@ -10026,6 +10177,34 @@ ruby_xfree(void *x)
ruby_sized_xfree(x, 0);
}
void *
rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
{
size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
return ruby_xmalloc(w);
}
void *
rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
{
size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
return ruby_xrealloc((void *)p, w);
}
void *
rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
{
size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
return ruby_xmalloc(u);
}
void *
rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
{
size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
return ruby_xcalloc(u, 1);
}
/* Mimic ruby_xmalloc, but need not rb_objspace.
* should return pointer suitable for ruby_xfree
*/
@ -10276,7 +10455,7 @@ wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
return ST_DELETE;
}
if (j < i) {
ptr = ruby_sized_xrealloc2(ptr, j + 1, sizeof(VALUE), i);
SIZED_REALLOC_N(ptr, VALUE, j + 1, i);
ptr[0] = j;
*value = (st_data_t)ptr;
}
@ -10491,7 +10670,7 @@ wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
if (existing) {
size = (ptr = optr = (VALUE *)*val)[0];
++size;
ptr = ruby_sized_xrealloc2(ptr, size + 1, sizeof(VALUE), size);
SIZED_REALLOC_N(ptr, VALUE, size + 1, size);
}
else {
optr = 0;
@ -10636,12 +10815,12 @@ gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
if (!objspace->profile.records) {
objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
objspace->profile.records = malloc(sizeof(gc_profile_record) * objspace->profile.size);
objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
}
if (index >= objspace->profile.size) {
void *ptr;
objspace->profile.size += 1000;
ptr = realloc(objspace->profile.records, sizeof(gc_profile_record) * objspace->profile.size);
ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
if (!ptr) rb_memerror();
objspace->profile.records = ptr;
}

Просмотреть файл

@ -1627,7 +1627,7 @@ void *ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR
void *ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_ALLOC_SIZE((2, 3));
void ruby_sized_xfree(void *x, size_t size);
RUBY_SYMBOL_EXPORT_END
#define SIZED_REALLOC_N(var,type,n,old_n) ((var)=(type*)ruby_sized_xrealloc((char*)(var), (n) * sizeof(type), (old_n) * sizeof(type)))
#define SIZED_REALLOC_N(var,type,n,old_n) ((var)=(type*)ruby_sized_xrealloc2((void*)(var), (n), sizeof(type), (old_n)))
#endif
/* optimized version of NEWOBJ() */
@ -1647,6 +1647,13 @@ __attribute__((__alloc_align__(1)))
void *rb_aligned_malloc(size_t, size_t) RUBY_ATTR_MALLOC RUBY_ATTR_ALLOC_SIZE((2));
void rb_aligned_free(void *);
size_t rb_size_mul_or_raise(size_t, size_t, VALUE); /* used in compile.c */
size_t rb_size_mul_add_or_raise(size_t, size_t, size_t, VALUE); /* used in iseq.h */
void *rb_xmalloc_mul_add(size_t, size_t, size_t) RUBY_ATTR_MALLOC;
void *rb_xrealloc_mul_add(const void *, size_t, size_t, size_t);
void *rb_xmalloc_mul_add_mul(size_t, size_t, size_t, size_t) RUBY_ATTR_MALLOC;
void *rb_xcalloc_mul_add_mul(size_t, size_t, size_t, size_t) RUBY_ATTR_MALLOC;
/* hash.c */
#if RHASH_CONVERT_TABLE_DEBUG
struct st_table *rb_hash_tbl_raw(VALUE hash, const char *file, int line);

7
iseq.c
Просмотреть файл

@ -3391,7 +3391,10 @@ succ_index_table_create(int max_pos, int *data, int size)
{
const int imm_size = (max_pos < IMMEDIATE_TABLE_SIZE ? max_pos + 8 : IMMEDIATE_TABLE_SIZE) / 9;
const int succ_size = (max_pos < IMMEDIATE_TABLE_SIZE ? 0 : (max_pos - IMMEDIATE_TABLE_SIZE + 511)) / 512;
struct succ_index_table *sd = ruby_xcalloc(imm_size * sizeof(uint64_t) + succ_size * sizeof(struct succ_dict_block), 1); /* zero cleared */
struct succ_index_table *sd =
rb_xcalloc_mul_add_mul(
imm_size, sizeof(uint64_t),
succ_size, sizeof(struct succ_dict_block));
int i, j, k, r;
r = 0;
@ -3426,7 +3429,7 @@ succ_index_table_invert(int max_pos, struct succ_index_table *sd, int size)
{
const int imm_size = (max_pos < IMMEDIATE_TABLE_SIZE ? max_pos + 8 : IMMEDIATE_TABLE_SIZE) / 9;
const int succ_size = (max_pos < IMMEDIATE_TABLE_SIZE ? 0 : (max_pos - IMMEDIATE_TABLE_SIZE + 511)) / 512;
unsigned int *positions = ruby_xmalloc(sizeof(unsigned int) * size), *p;
unsigned int *positions = ALLOC_N(unsigned int, size), *p;
int i, j, k, r = -1;
p = positions;
for (j = 0; j < imm_size; j++) {

6
iseq.h
Просмотреть файл

@ -26,7 +26,9 @@ extern const ID rb_iseq_shared_exc_local_tbl[];
static inline size_t
rb_call_info_kw_arg_bytes(int keyword_len)
{
return sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (keyword_len - 1);
return rb_size_mul_add_or_raise(
keyword_len - 1, sizeof(VALUE), sizeof(struct rb_call_info_kw_arg),
rb_eRuntimeError);
}
#define ISEQ_COVERAGE(iseq) iseq->body->variable.coverage
@ -67,7 +69,7 @@ static inline VALUE *
ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
{
return iseq->body->variable.original_iseq =
ruby_xmalloc2(size, sizeof(VALUE));
ALLOC_N(VALUE, size);
}
#define ISEQ_TRACE_EVENTS (RUBY_EVENT_LINE | \

11
node.c
Просмотреть файл

@ -1145,8 +1145,13 @@ init_node_buffer_list(node_buffer_list_t * nb, node_buffer_elem_t *head)
static node_buffer_t *
rb_node_buffer_new(void)
{
size_t bucket_size = offsetof(node_buffer_elem_t, buf) + NODE_BUF_DEFAULT_LEN * sizeof(NODE);
node_buffer_t *nb = xmalloc(sizeof(node_buffer_t) + (bucket_size * 2));
const size_t bucket_size = offsetof(node_buffer_elem_t, buf) + NODE_BUF_DEFAULT_LEN * sizeof(NODE);
const size_t alloc_size = sizeof(node_buffer_t) + (bucket_size * 2);
STATIC_ASSERT(
integer_overflow,
offsetof(node_buffer_elem_t, buf) + NODE_BUF_DEFAULT_LEN * sizeof(NODE)
> sizeof(node_buffer_t) + 2 * sizeof(node_buffer_elem_t));
node_buffer_t *nb = ruby_xmalloc(alloc_size);
init_node_buffer_list(&nb->unmarkable, (node_buffer_elem_t*)&nb[1]);
init_node_buffer_list(&nb->markable, (node_buffer_elem_t*)((size_t)nb->unmarkable.head + bucket_size));
nb->mark_ary = Qnil;
@ -1179,7 +1184,7 @@ ast_newnode_in_bucket(node_buffer_list_t *nb)
if (nb->idx >= nb->len) {
long n = nb->len * 2;
node_buffer_elem_t *nbe;
nbe = xmalloc(offsetof(node_buffer_elem_t, buf) + n * sizeof(NODE));
nbe = rb_xmalloc_mul_add(n, sizeof(NODE), offsetof(node_buffer_elem_t, buf));
nbe->len = n;
nb->idx = 0;
nb->len = n;

Просмотреть файл

@ -2175,7 +2175,7 @@ rb_str_modify_expand(VALUE str, long expand)
if (expand < 0) {
rb_raise(rb_eArgError, "negative expanding string size");
}
if (expand > LONG_MAX - len) {
if (expand >= LONG_MAX - len) {
rb_raise(rb_eArgError, "string size too big");
}

Просмотреть файл

@ -3867,7 +3867,9 @@ rb_fd_set(int fd, rb_fdset_t *set)
}
if (set->fdset->fd_count >= (unsigned)set->capa) {
set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
set->fdset =
rb_xrealloc_mul_add(
set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
}
set->fdset->fd_array[set->fdset->fd_count++] = s;
}

Просмотреть файл

@ -440,6 +440,9 @@ Init_TransientHeap(void)
theap->promoted_objects_index = 0;
/* should not use ALLOC_N to be free from GC */
theap->promoted_objects = malloc(sizeof(VALUE) * theap->promoted_objects_size);
STATIC_ASSERT(
integer_overflow,
sizeof(VALUE) <= SIZE_MAX / TRANSIENT_HEAP_PROMOTED_DEFAULT_SIZE);
if (theap->promoted_objects == NULL) rb_bug("Init_TransientHeap: malloc failed.");
}
@ -618,7 +621,13 @@ transient_heap_promote_add(struct transient_heap* theap, VALUE obj)
if (theap->promoted_objects_size <= theap->promoted_objects_index) {
theap->promoted_objects_size *= 2;
if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "rb_transient_heap_promote: expand table to %d\n", theap->promoted_objects_size);
theap->promoted_objects = realloc(theap->promoted_objects, theap->promoted_objects_size * sizeof(VALUE));
if (UNLIKELY((size_t)theap->promoted_objects_size > SIZE_MAX / sizeof(VALUE))) {
/* realloc failure due to integer overflow */
theap->promoted_objects = NULL;
}
else {
theap->promoted_objects = realloc(theap->promoted_objects, theap->promoted_objects_size * sizeof(VALUE));
}
if (theap->promoted_objects == NULL) rb_bug("rb_transient_heap_promote: realloc failed");
}
theap->promoted_objects[theap->promoted_objects_index++] = obj;

Просмотреть файл

@ -526,7 +526,7 @@ bt_init(void *ptr, size_t size)
struct bt_iter_arg *arg = (struct bt_iter_arg *)ptr;
arg->btobj = backtrace_alloc(rb_cBacktrace);
GetCoreDataFromValue(arg->btobj, rb_backtrace_t, arg->bt);
arg->bt->backtrace_base = arg->bt->backtrace = ruby_xmalloc(sizeof(rb_backtrace_location_t) * size);
arg->bt->backtrace_base = arg->bt->backtrace = ALLOC_N(rb_backtrace_location_t, size);
arg->bt->backtrace_size = 0;
}