These APIs are much like <valgrind/memcheck.h>. Use them to
fine-grain annotate the usage of our memory.


git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65573 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
shyouhei 2018-11-06 10:06:07 +00:00
Родитель dbd90b2dff
Коммит 4a80c0540f
5 изменённых файлов: 104 добавлений и 5 удалений

Просмотреть файл

@ -1023,6 +1023,8 @@ AC_CHECK_HEADERS(malloc_np.h)
AC_CHECK_HEADERS(net/socket.h)
AC_CHECK_HEADERS(process.h)
AC_CHECK_HEADERS(pwd.h)
AC_CHECK_HEADERS(sanitizer/asan_interface.h)
AC_CHECK_HEADERS(sanitizer/msan_interface.h)
AC_CHECK_HEADERS(setjmpex.h)
AC_CHECK_HEADERS(stdalign.h)
AC_CHECK_HEADERS(sys/attr.h)

32
gc.c
Просмотреть файл

@ -1434,6 +1434,7 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj
if (RGENGC_CHECK_MODE && !is_pointer_to_heap(objspace, p)) {
rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
}
poison_object(obj);
gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
}
@ -1758,6 +1759,7 @@ heap_get_freeobj_from_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
p = page->freelist;
page->freelist = NULL;
page->free_slots = 0;
unpoison_object((VALUE)p, true);
return p;
}
@ -1768,6 +1770,7 @@ heap_get_freeobj_head(rb_objspace_t *objspace, rb_heap_t *heap)
if (LIKELY(p != NULL)) {
heap->freelist = p->as.free.next;
}
unpoison_object((VALUE)p, true);
return (VALUE)p;
}
@ -1778,6 +1781,7 @@ heap_get_freeobj(rb_objspace_t *objspace, rb_heap_t *heap)
while (1) {
if (LIKELY(p != NULL)) {
unpoison_object((VALUE)p, true);
heap->freelist = p->as.free.next;
return (VALUE)p;
}
@ -2612,8 +2616,11 @@ static int
internal_object_p(VALUE obj)
{
RVALUE *p = (RVALUE *)obj;
void *ptr = __asan_region_is_poisoned(p, SIZEOF_VALUE);
bool used_p = p->as.basic.flags;
unpoison_object(obj, false);
if (p->as.basic.flags) {
if (used_p) {
switch (BUILTIN_TYPE(p)) {
case T_NODE:
UNEXPECTED_NODE(internal_object_p);
@ -2634,6 +2641,9 @@ internal_object_p(VALUE obj)
return 0;
}
}
if (ptr || ! used_p) {
poison_object(obj);
}
return 1;
}
@ -2924,8 +2934,11 @@ static void
finalize_list(rb_objspace_t *objspace, VALUE zombie)
{
while (zombie) {
VALUE next_zombie = RZOMBIE(zombie)->next;
struct heap_page *page = GET_HEAP_PAGE(zombie);
VALUE next_zombie;
struct heap_page *page;
unpoison_object(zombie, false);
next_zombie = RZOMBIE(zombie)->next;
page = GET_HEAP_PAGE(zombie);
run_final(objspace, zombie);
@ -3044,6 +3057,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
for (i = 0; i < heap_allocated_pages; i++) {
p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->total_slots;
while (p < pend) {
unpoison_object((VALUE)p, false);
switch (BUILTIN_TYPE(p)) {
case T_DATA:
if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
@ -3067,6 +3081,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
}
break;
}
poison_object((VALUE)p);
p++;
}
}
@ -3610,6 +3625,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
if (bitset) {
p = offset + i * BITS_BITLENGTH;
do {
unpoison_object((VALUE)p, false);
if (bitset & 1) {
switch (BUILTIN_TYPE(p)) {
default: { /* majority case */
@ -3628,6 +3644,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
heap_page_add_freeobj(objspace, sweep_page, (VALUE)p);
gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info((VALUE)p));
freed_slots++;
poison_object((VALUE)p);
}
break;
}
@ -4419,10 +4436,17 @@ gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
{
(void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
if (is_pointer_to_heap(objspace, (void *)obj)) {
int type = BUILTIN_TYPE(obj);
int type;
void *ptr = __asan_region_is_poisoned((void *)obj, SIZEOF_VALUE);
unpoison_object(obj, false);
type = BUILTIN_TYPE(obj);
if (type != T_ZOMBIE && type != T_NONE) {
gc_mark_ptr(objspace, obj);
}
if (ptr) {
poison_object(obj);
}
}
}

Просмотреть файл

@ -69,7 +69,7 @@ extern "C" {
#endif
#ifndef NO_SANITIZE
#define NO_SANITIZE(x, y) y
# define NO_SANITIZE(x, y) y
#endif
#ifdef HAVE_VALGRIND_MEMCHECK_H
@ -95,6 +95,60 @@ extern "C" {
# define __has_extension __has_feature
#endif
#ifdef HAVE_SANITIZER_ASAN_INTERFACE_H
# include <sanitizer/asan_interface.h>
#endif
#if !__has_feature(address_sanitizer)
# define __asan_poison_memory_region(x, y)
# define __asan_unpoison_memory_region(x, y)
# define __asan_region_is_poisoned(x, y) 0
#endif
#ifdef HAVE_SANITIZER_MSAN_INTERFACE_H
# include <sanitizer/msan_interface.h>
#endif
#if !__has_feature(memory_sanitizer)
# define __msan_allocated_memory(x, y)
# define __msan_poison(x, y)
# define __msan_unpoison(x, y)
# define __msan_unpoison_string(x)
#endif
static inline void
poison_memory_region(const volatile void *ptr, size_t size)
{
__msan_poison(ptr, size);
__asan_poison_memory_region(ptr, size);
}
static inline void
poison_object(VALUE obj)
{
struct RVALUE *ptr = (void *)obj;
poison_memory_region(ptr, SIZEOF_VALUE);
}
static inline void
unpoison_memory_region(const volatile void *ptr, size_t size, bool malloc_p)
{
__asan_unpoison_memory_region(ptr, size);
if (malloc_p) {
__msan_allocated_memory(ptr, size);
}
else {
__msan_unpoison(ptr, size);
}
}
static inline void
unpoison_object(VALUE obj, bool newobj_p)
{
struct RVALUE *ptr = (void *)obj;
unpoison_memory_region(ptr, SIZEOF_VALUE, newobj_p);
}
/* Prevent compiler from reordering access */
#define ACCESS_ONCE(type,x) (*((volatile type *)&(x)))

Просмотреть файл

@ -803,6 +803,11 @@ VALUE
rb_str_new_cstr(const char *ptr)
{
must_not_null(ptr);
/* rb_str_new_cstr() can take pointer from non-malloc-generated
* memory regions, and that cannot be detected by the MSAN. Just
* trust the programmer that the argument passed here is a sane C
* string. */
__msan_unpoison_string(ptr);
return rb_str_new(ptr, strlen(ptr));
}

Просмотреть файл

@ -230,11 +230,13 @@ transient_heap_get(void)
static void
reset_block(struct transient_heap_block *block)
{
__msan_allocated_memory(block, sizeof block);
block->info.size = TRANSIENT_HEAP_BLOCK_SIZE - sizeof(struct transient_heap_block_header);
block->info.index = 0;
block->info.objects = 0;
block->info.last_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_LAST;
block->info.next_block = NULL;
__asan_poison_memory_region(&block->buff, sizeof block->buff);
}
static void
@ -379,10 +381,17 @@ rb_transient_heap_alloc(VALUE obj, size_t req_size)
if (header) {
void *ptr;
/* header is poisoned to prevent buffer overflow, should
* unpoison first... */
unpoison_memory_region(header, sizeof *header, true);
header->size = size;
header->magic = TRANSIENT_HEAP_ALLOC_MAGIC;
header->next_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_FREE;
header->obj = obj; /* TODO: can we eliminate it? */
/* header is fixed; shall poison again */
poison_memory_region(header, sizeof *header);
ptr = header + 1;
theap->total_objects++; /* statistics */
@ -395,6 +404,9 @@ rb_transient_heap_alloc(VALUE obj, size_t req_size)
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_alloc: header:%p ptr:%p size:%d obj:%s\n", (void *)header, ptr, (int)size, rb_obj_info(obj));
RB_DEBUG_COUNTER_INC(theap_alloc);
/* ptr is set up; OK to unpoison. */
unpoison_memory_region(ptr, size, true);
return ptr;
}
else {
@ -509,6 +521,7 @@ void
rb_transient_heap_mark(VALUE obj, const void *ptr)
{
struct transient_alloc_header *header = ptr_to_alloc_header(ptr);
unpoison_memory_region(header, sizeof *header, false);
if (header->magic != TRANSIENT_HEAP_ALLOC_MAGIC) rb_bug("rb_transient_heap_mark: wrong header, %s (%p)", rb_obj_info(obj), ptr);
if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_mark: %s (%p)\n", rb_obj_info(obj), ptr);
@ -537,6 +550,7 @@ rb_transient_heap_mark(VALUE obj, const void *ptr)
else {
struct transient_heap* theap = transient_heap_get();
struct transient_heap_block *block = alloc_header_to_block(theap, header);
__asan_unpoison_memory_region(&block->info, sizeof block->info);
header->next_marked_index = block->info.last_marked_index;
block->info.last_marked_index = (int)((char *)header - block->buff);
theap->total_marked_objects++;