* gc.c (rb_alloc_tmp_buffer_with_count): added like xmalloc2 to

avoid duplicated check of size.

* gc.c (ruby_xmalloc2): added to keep separate layers.

* include/ruby/ruby.h (rb_alloc_tmp_buffer2): added to check
  the size more statically.

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@54664 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
naruse 2016-04-21 20:59:40 +00:00
Родитель f056071cdc
Коммит feaa82a42b
3 изменённых файлов: 52 добавлений и 11 удалений

Просмотреть файл

@ -1,3 +1,13 @@
Fri Apr 22 04:57:01 2016 NARUSE, Yui <naruse@ruby-lang.org>
* gc.c (rb_alloc_tmp_buffer_with_count): added like xmalloc2 to
avoid duplicated check of size.
* gc.c (ruby_xmalloc2): added to keep separate layers.
* include/ruby/ruby.h (rb_alloc_tmp_buffer2): added to check
the size more statically.
Fri Apr 22 04:54:40 2016 NARUSE, Yui <naruse@ruby-lang.org> Fri Apr 22 04:54:40 2016 NARUSE, Yui <naruse@ruby-lang.org>
* include/ruby/ruby.h (LIKELY): moved from internal.h. * include/ruby/ruby.h (LIKELY): moved from internal.h.

29
gc.c
Просмотреть файл

@ -7850,6 +7850,12 @@ objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE); objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE);
} }
static void *
ruby_xmalloc0(size_t size)
{
return objspace_xmalloc0(&rb_objspace, size);
}
void * void *
ruby_xmalloc(size_t size) ruby_xmalloc(size_t size)
{ {
@ -7972,22 +7978,29 @@ ruby_mimfree(void *ptr)
} }
void * void *
rb_alloc_tmp_buffer(volatile VALUE *store, long len) rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
{ {
NODE *s; NODE *s;
long cnt;
void *ptr; void *ptr;
s = rb_node_newnode(NODE_ALLOCA, 0, 0, 0);
ptr = ruby_xmalloc0(size);
s->u1.value = (VALUE)ptr;
s->u3.cnt = cnt;
*store = (VALUE)s;
return ptr;
}
void *
rb_alloc_tmp_buffer(volatile VALUE *store, long len)
{
long cnt;
if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) { if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
rb_raise(rb_eArgError, "negative buffer size (or size too big)"); rb_raise(rb_eArgError, "negative buffer size (or size too big)");
} }
s = rb_node_newnode(NODE_ALLOCA, 0, 0, 0); return rb_alloc_tmp_buffer_with_count(store, len, cnt);
ptr = ruby_xmalloc(cnt * sizeof(VALUE));
s->u1.value = (VALUE)ptr;
s->u3.cnt = cnt;
*store = (VALUE)s;
return ptr;
} }
void void

Просмотреть файл

@ -1615,6 +1615,7 @@ rb_num2char_inline(VALUE x)
#define ALLOCA_N(type,n) ((type*)alloca(sizeof(type)*(n))) #define ALLOCA_N(type,n) ((type*)alloca(sizeof(type)*(n)))
void *rb_alloc_tmp_buffer(volatile VALUE *store, long len) RUBY_ATTR_ALLOC_SIZE((2)); void *rb_alloc_tmp_buffer(volatile VALUE *store, long len) RUBY_ATTR_ALLOC_SIZE((2));
void *rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t len,size_t count) RUBY_ATTR_ALLOC_SIZE((2,3));
void rb_free_tmp_buffer(volatile VALUE *store); void rb_free_tmp_buffer(volatile VALUE *store);
NORETURN(void ruby_malloc_size_overflow(size_t, size_t)); NORETURN(void ruby_malloc_size_overflow(size_t, size_t));
static inline size_t static inline size_t
@ -1625,21 +1626,38 @@ ruby_xmalloc2_size(const size_t count, const size_t elsize)
} }
return count * elsize; return count * elsize;
} }
static inline void *
rb_alloc_tmp_buffer2(volatile VALUE *store, long count, size_t elsize)
{
size_t cnt = (size_t)count;
if (elsize % sizeof(VALUE) == 0) {
if (UNLIKELY(cnt > LONG_MAX / sizeof(VALUE))) {
ruby_malloc_size_overflow(cnt, elsize);
}
}
else {
if (UNLIKELY(cnt > (LONG_MAX - sizeof(VALUE)) / elsize)) {
ruby_malloc_size_overflow(count, elsize);
}
cnt = (cnt * elsize + sizeof(VALUE) - 1) / sizeof(VALUE);
}
return rb_alloc_tmp_buffer_with_count(store, cnt * sizeof(VALUE), cnt);
}
/* allocates _n_ bytes temporary buffer and stores VALUE including it /* allocates _n_ bytes temporary buffer and stores VALUE including it
* in _v_. _n_ may be evaluated twice. */ * in _v_. _n_ may be evaluated twice. */
#ifdef C_ALLOCA #ifdef C_ALLOCA
# define RB_ALLOCV(v, n) rb_alloc_tmp_buffer(&(v), (n)) # define RB_ALLOCV(v, n) rb_alloc_tmp_buffer(&(v), (n))
# define RB_ALLOCV_N(type, v, n) \ # define RB_ALLOCV_N(type, v, n) \
((type*)RB_ALLOCV((v), ruby_xmalloc2_size((n), sizeof(type)))) rb_alloc_tmp_buffer2(&(v), (n), sizeof(type))))
#else #else
# define RUBY_ALLOCV_LIMIT 1024 # define RUBY_ALLOCV_LIMIT 1024
# define RB_ALLOCV(v, n) ((n) < RUBY_ALLOCV_LIMIT ? \ # define RB_ALLOCV(v, n) ((n) < RUBY_ALLOCV_LIMIT ? \
(RB_GC_GUARD(v) = 0, alloca(n)) : \ (RB_GC_GUARD(v) = 0, alloca(n)) : \
rb_alloc_tmp_buffer(&(v), (n))) rb_alloc_tmp_buffer(&(v), (n)))
# define RB_ALLOCV_N(type, v, n) \ # define RB_ALLOCV_N(type, v, n) \
((type*)(ruby_xmalloc2_size((n), sizeof(type)) < RUBY_ALLOCV_LIMIT ? \ ((type*)(((size_t)(n) < RUBY_ALLOCV_LIMIT / sizeof(type)) ? \
(RB_GC_GUARD(v) = 0, alloca((n) * sizeof(type))) : \ (RB_GC_GUARD(v) = 0, alloca((n) * sizeof(type))) : \
rb_alloc_tmp_buffer(&(v), (n) * sizeof(type)))) rb_alloc_tmp_buffer2(&(v), (n), sizeof(type))))
#endif #endif
#define RB_ALLOCV_END(v) rb_free_tmp_buffer(&(v)) #define RB_ALLOCV_END(v) rb_free_tmp_buffer(&(v))