From 48ff7a9f3e47bffb3e4d067a12ba9b936261caa0 Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Mon, 19 Jul 2021 13:52:14 -0400 Subject: [PATCH] [Feature #18045] Remove T_PAYLOAD This commit removes T_PAYLOAD since the new VWA implementation no longer requires T_PAYLOAD types. Co-authored-by: Aaron Patterson --- ext/objspace/objspace.c | 2 - gc.c | 345 +---------------------------- include/ruby/internal/value_type.h | 2 - internal/gc.h | 4 - misc/lldb_cruby.py | 3 - vm_eval.c | 1 - 6 files changed, 2 insertions(+), 355 deletions(-) diff --git a/ext/objspace/objspace.c b/ext/objspace/objspace.c index 72d1eb888d..3fa4fd279b 100644 --- a/ext/objspace/objspace.c +++ b/ext/objspace/objspace.c @@ -66,7 +66,6 @@ total_i(VALUE v, void *ptr) case T_IMEMO: case T_ICLASS: case T_NODE: - case T_PAYLOAD: case T_ZOMBIE: return; default: @@ -225,7 +224,6 @@ type2sym(enum ruby_value_type i) CASE_TYPE(T_ICLASS); CASE_TYPE(T_MOVED); CASE_TYPE(T_ZOMBIE); - CASE_TYPE(T_PAYLOAD); #undef CASE_TYPE default: rb_bug("type2sym: unknown type (%d)", i); } diff --git a/gc.c b/gc.c index fa4ab44712..59dcba0a09 100644 --- a/gc.c +++ b/gc.c @@ -556,7 +556,6 @@ typedef struct gc_profile_record { } gc_profile_record; #define FL_FROM_FREELIST FL_USER0 -#define FL_FROM_PAYLOAD FL_USER0 struct RMoved { VALUE flags; @@ -570,31 +569,12 @@ struct RMoved { #pragma pack(push, 4) /* == SIZEOF_VALUE: magic for reducing sizeof(RVALUE): 24 -> 20 */ #endif -struct RPayload { - VALUE flags; -}; -#define RPAYLOAD(obj) ((struct RPayload *)obj) -static unsigned short -RPAYLOAD_LEN(VALUE obj) -{ - unsigned short len = (unsigned short)(RPAYLOAD(obj)->flags >> FL_USHIFT); - return len; -} - -static void -RPAYLOAD_FLAGS_SET(VALUE obj, unsigned short len) -{ - // as len is the only thing in the user bits, we can overwrite it every time - RPAYLOAD(obj)->flags = T_PAYLOAD | (len << FL_USHIFT); -} - typedef struct RVALUE { union { struct { VALUE flags; /* always 0 for freed obj */ struct RVALUE *next; } free; - struct RPayload payload; struct RMoved moved; struct RBasic basic; struct RObject object; @@ -1290,36 +1270,6 @@ RVALUE_FLAGS_AGE(VALUE flags) return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT); } -#if USE_RVARGC -static VALUE -payload_or_self(VALUE obj) -{ - struct heap_page *p = GET_HEAP_PAGE(obj); - VALUE cur = (VALUE)p->start; - - while (cur != obj && GET_HEAP_PAGE(cur) == p) { - VALUE p = cur; - void *poisoned = asan_poisoned_object_p((VALUE)p); - asan_unpoison_object((VALUE)p, false); - - if (BUILTIN_TYPE(cur) == T_PAYLOAD) { - if (cur < obj && obj < cur + RPAYLOAD_LEN(cur) * sizeof(RVALUE)) { - return cur; - } - cur += RPAYLOAD_LEN(cur) * sizeof(RVALUE); - } - else { - cur += sizeof(RVALUE); - } - if (poisoned) { - asan_poison_object((VALUE)p); - } - } - - return obj; -} -#endif - static int check_rvalue_consistency_force(const VALUE obj, int terminate) { @@ -1527,18 +1477,6 @@ RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *pag { MARK_IN_BITMAP(&page->uncollectible_bits[0], obj); objspace->rgengc.old_objects++; - -#if USE_RVARGC - if (BUILTIN_TYPE(obj) == T_PAYLOAD) { - int plen = RPAYLOAD_LEN(obj); - - for (int i = 1; i < plen; i++) { - VALUE pbody = obj + i * sizeof(RVALUE); - MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(pbody), pbody); - } - objspace->rgengc.old_objects += plen - 1; - } -#endif rb_transient_heap_promote(obj); #if RGENGC_PROFILE >= 2 @@ -1628,11 +1566,6 @@ RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj) if (RVALUE_MARKED(obj)) { objspace->rgengc.old_objects--; -#if USE_RVARGC - if (BUILTIN_TYPE(obj) == T_PAYLOAD) { - objspace->rgengc.old_objects -= RPAYLOAD_LEN(obj) - 1; - } -#endif } check_rvalue_consistency(obj); @@ -2255,136 +2188,11 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, return obj; } -static unsigned long -rvargc_slot_count(size_t size) -{ - // roomof == ceiling division, so we don't have to do div then mod - return roomof(size + sizeof(struct RPayload), sizeof(RVALUE)); -} - -#if USE_RVARGC -static RVALUE * -rvargc_find_contiguous_slots(int slots, RVALUE *freelist) -{ - RVALUE *cursor = freelist; - RVALUE *previous_region = NULL; - - while (cursor) { - int i; - RVALUE *search = cursor; - for (i = 0; i < (slots - 1); i++) { - - // Peek ahead to see if the region is contiguous - if (search->as.free.next == (search - 1)) { - search = search->as.free.next; - } - else { - // Next slot is not contiguous - if (search->as.free.next) { - cursor = search->as.free.next; - previous_region = search; - - break; - } - else { - // Hit the end of the free list - return NULL; - } - } - } - - if (i == slots - 1) { - if (previous_region) { - previous_region->as.free.next = search->as.free.next; - search->as.free.next = freelist; - } - return search; - } - } - rb_bug("rvargc_find_contiguous_slots: unreachable"); -} -#endif - static inline void heap_add_freepage(rb_heap_t *heap, struct heap_page *page); static struct heap_page * heap_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap); static inline void ractor_set_cache(rb_ractor_t *cr, struct heap_page *page); #if USE_RVARGC -static inline void * -rvargc_find_region(size_t size, rb_ractor_t *cr, RVALUE *freelist) -{ - // maintain master behaviour when we only need one slot - if (size == sizeof(RVALUE)) - return freelist; - - if (!freelist) return freelist; - - rb_objspace_t *objspace = &rb_objspace; - int slots = (int)rvargc_slot_count(size); - RVALUE * p = rvargc_find_contiguous_slots(slots, freelist); - - // We found a contiguous space on the freelist stored in the ractor cache - if (p) { - struct heap_page *page = GET_HEAP_PAGE(p); - - page->free_slots -= slots; - asan_unpoison_memory_region(p, sizeof(RVALUE) * slots, false); - return p; - } - else { - struct heap_page *search_page; - heap_allocatable_pages_set(objspace, heap_allocatable_pages + 1); - - while (!p) { - // search_page is the page we're going to search for contiguous slots - search_page = heap_next_freepage(objspace, heap_eden); - p = rvargc_find_contiguous_slots(slots, search_page->freelist); - - if (p) { - // Remove the region from the freelist - search_page->freelist = p->as.free.next; - search_page->free_slots -= slots; - - // If we started sweeping, the object cache can be removed - // from the ractor. Set it to the page we found - if (!cr->newobj_cache.using_page) { - ractor_set_cache(cr, search_page); - } - // Otherwise we need to add this page back to the list of free - // pages. - else { - // make this pointer point at the Ractor's freelist - p->as.free.next = freelist; - } - - asan_unpoison_memory_region(p, sizeof(RVALUE) * slots, false); - return p; - } - } - } - return NULL; -} -#endif - -int -rb_slot_size(void) -{ - return sizeof(RVALUE); -} - -VALUE -rb_rvargc_payload_init(VALUE obj, size_t size) -{ - rb_objspace_t * objspace = &rb_objspace; - struct RPayload *ph = (struct RPayload *)obj; - memset(ph, 0, rvargc_slot_count(size) * sizeof(RVALUE)); - - RPAYLOAD_FLAGS_SET((VALUE)ph, rvargc_slot_count(size)); - objspace->total_allocated_objects += rvargc_slot_count(size); - - return (VALUE)ph; -} - void * rb_rvargc_payload_data_ptr(VALUE phead) { @@ -2394,11 +2202,7 @@ rb_rvargc_payload_data_ptr(VALUE phead) static inline VALUE ractor_cached_free_region(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size) { -#if USE_RVARGC - RVALUE *p = rvargc_find_region(size, cr, cr->newobj_cache.freelist); -#else RVALUE *p = cr->newobj_cache.freelist; -#endif if (p) { VALUE obj = (VALUE)p; @@ -2496,10 +2300,6 @@ newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t * } GC_ASSERT(obj != 0); newobj_init(klass, flags, wb_protected, objspace, obj); -#if USE_RVARGC - if (alloc_size > sizeof(RVALUE)) - rb_rvargc_payload_init(obj + sizeof(RVALUE), alloc_size - sizeof(RVALUE)); -#endif gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_fill(obj, 0, 0, 0)); } @@ -2550,10 +2350,6 @@ newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t a (obj = ractor_cached_free_region(objspace, cr, alloc_size)) != Qfalse)) { newobj_init(klass, flags, wb_protected, objspace, obj); -#if USE_RVARGC - if (alloc_size > sizeof(RVALUE)) - rb_rvargc_payload_init(obj + sizeof(RVALUE), alloc_size - sizeof(RVALUE)); -#endif } else { RB_DEBUG_COUNTER_INC(obj_newobj_slowpath); @@ -3554,29 +3350,12 @@ objspace_each_objects_try(VALUE arg) while (cursor_end < pend) { int payload_len = 0; -#if USE_RVARGC - while (cursor_end < pend && BUILTIN_TYPE((VALUE)cursor_end) != T_PAYLOAD) { - cursor_end++; - } - - //Make sure the Payload header slot is yielded - if (cursor_end < pend && BUILTIN_TYPE((VALUE)cursor_end) == T_PAYLOAD) { - payload_len = RPAYLOAD_LEN((VALUE)cursor_end); - cursor_end++; - } -#else cursor_end = pend; -#endif if ((*data->callback)(pstart, cursor_end, sizeof(RVALUE), data->data)) { break; } - // Move the cursor over the rest of the payload body - if (payload_len) { - cursor_end += (payload_len - 1); - pstart = cursor_end; - } } page = list_next(&heap_eden->pages, page, page_node); @@ -3701,7 +3480,6 @@ internal_object_p(VALUE obj) case T_IMEMO: case T_ICLASS: case T_ZOMBIE: - case T_PAYLOAD: break; case T_CLASS: if (!p->as.basic.klass) break; @@ -4680,7 +4458,6 @@ obj_memsize_of(VALUE obj, int use_all_types) case T_ZOMBIE: case T_MOVED: - case T_PAYLOAD: break; default: @@ -4738,7 +4515,6 @@ type_sym(size_t type) COUNT_TYPE(T_ICLASS); COUNT_TYPE(T_ZOMBIE); COUNT_TYPE(T_MOVED); - COUNT_TYPE(T_PAYLOAD); #undef COUNT_TYPE default: return SIZET2NUM(type); break; } @@ -4811,13 +4587,6 @@ count_objects(int argc, VALUE *argv, VALUE os) void *poisoned = asan_poisoned_object_p(vp); asan_unpoison_object(vp, false); -#if USE_RVARGC - if (RB_TYPE_P(vp, T_PAYLOAD)) { - stride = RPAYLOAD_LEN(vp); - counts[BUILTIN_TYPE(vp)] += RPAYLOAD_LEN(vp); - } - else -#endif if (p->as.basic.flags) { counts[BUILTIN_TYPE(vp)]++; } @@ -5349,35 +5118,6 @@ gc_plane_sweep(rb_objspace_t *objspace, rb_heap_t *heap, intptr_t p, bits_t bits } break; - /* minor cases */ - case T_PAYLOAD: - { - int plen = RPAYLOAD_LEN(vp); - ctx->freed_slots += plen; - - (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)vp, sizeof(RVALUE)); - heap_page_add_freeobj(objspace, sweep_page, vp); - - // This loop causes slots *following this slot* to be marked as - // T_NONE. On the next iteration of this sweep loop, the T_NONE slots - // can be double counted. Mutating the bit plane is difficult because it's - // copied to a local variable. So we would need special logic to mutate - // local bitmap plane (stored in `bitset`) plane, versus T_PAYLOAD objects that span - // bitplanes. (Imagine a T_PAYLOAD at positions 0-3 versus positions 62-65, - // their mark bits would be on different planes. We would have to mutate only `bitset` - // for the first case, but `bitset` and `bits[i+1]` for the second - for (int i = 1; i < plen; i++) { - VALUE pbody = vp + i * sizeof(RVALUE); - - (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)pbody, sizeof(RVALUE)); - heap_page_add_freeobj(objspace, sweep_page, pbody); - - // Lets set a bit on the object so that the T_NONE branch - // will know to avoid double counting this slot. - FL_SET(pbody, FL_FROM_PAYLOAD); - } - } - break; case T_MOVED: if (objspace->flags.during_compacting) { /* The sweep cursor shouldn't have made it to any @@ -5405,14 +5145,7 @@ gc_plane_sweep(rb_objspace_t *objspace, rb_heap_t *heap, intptr_t p, bits_t bits MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp); } else { - // This slot came from a T_PAYLOAD object and - // has already been counted - if (FL_TEST(vp, FL_FROM_PAYLOAD)) { - FL_UNSET(vp, FL_FROM_PAYLOAD); - } - else { - ctx->empty_slots++; /* already freed */ - } + ctx->empty_slots++; /* already freed */ } break; } @@ -5974,7 +5707,6 @@ push_mark_stack(mark_stack_t *stack, VALUE data) case T_TRUE: case T_FALSE: case T_SYMBOL: - case T_PAYLOAD: case T_IMEMO: case T_ICLASS: if (stack->index == stack->limit) { @@ -6512,18 +6244,12 @@ rb_mark_tbl_no_pin(st_table *tbl) mark_tbl_no_pin(&rb_objspace, tbl); } -static void gc_mark_payload(rb_objspace_t *objspace, VALUE obj); - static void gc_mark_maybe(rb_objspace_t *objspace, VALUE obj) { (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj)); if (is_pointer_to_heap(objspace, (void *)obj)) { -#if USE_RVARGC - obj = payload_or_self(obj); -#endif - void *ptr = __asan_region_is_poisoned((void *)obj, SIZEOF_VALUE); asan_unpoison_object(obj, false); @@ -6654,17 +6380,6 @@ gc_aging(rb_objspace_t *objspace, VALUE obj) GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE); RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj); } - -#if USE_RVARGC - if (RVALUE_UNCOLLECTIBLE(obj) && BUILTIN_TYPE(obj) == T_PAYLOAD) { - int plen = RPAYLOAD_LEN(obj); - - for (int i = 1; i < plen; i++) { - VALUE pbody = obj + i * sizeof(RVALUE); - MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(pbody), pbody); - } - } -#endif } check_rvalue_consistency(obj); @@ -6846,22 +6561,6 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj) } } -static inline void -gc_mark_payload(rb_objspace_t *objspace, VALUE obj) -{ -#if USE_RVARGC - GC_ASSERT(BUILTIN_TYPE(obj) == T_PAYLOAD); - // Mark payload head here - gc_mark_and_pin(objspace, obj); - - for (int i = 1 ; i < RPAYLOAD_LEN(obj); i++) { - VALUE p = obj + i * sizeof(RVALUE); - MARK_IN_BITMAP(GET_HEAP_MARK_BITS(p), p); - MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(p), p); - } -#endif -} - static void gc_mark_children(rb_objspace_t *objspace, VALUE obj) { @@ -6897,14 +6596,9 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj) break; } - if (BUILTIN_TYPE(obj) != T_PAYLOAD) { - gc_mark(objspace, any->as.basic.klass); - } + gc_mark(objspace, any->as.basic.klass); switch (BUILTIN_TYPE(obj)) { - case T_PAYLOAD: - gc_mark_payload(objspace, obj); - break; case T_CLASS: case T_MODULE: if (RCLASS_SUPER(obj)) { @@ -6916,7 +6610,6 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj) cc_table_mark(objspace, obj); mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj)); mark_const_tbl(objspace, RCLASS_CONST_TBL(obj)); - gc_mark_payload(objspace, (VALUE)((uintptr_t)RCLASS(obj)->ptr - sizeof(struct RPayload))); break; case T_ICLASS: @@ -6929,7 +6622,6 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj) if (!RCLASS_EXT(obj)) break; mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj)); cc_table_mark(objspace, obj); - gc_mark_payload(objspace, (VALUE)((uintptr_t)RCLASS(obj)->ptr - sizeof(struct RPayload))); break; case T_ARRAY: @@ -7563,14 +7255,6 @@ verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data); } } - - /* make sure we have counted the payload body slots */ - if (BUILTIN_TYPE(obj) == T_PAYLOAD) { - if (RVALUE_OLD_P(obj)) { - data->old_object_count += RPAYLOAD_LEN(obj) - 1; - } - data->live_object_count += RPAYLOAD_LEN(obj) - 1; - } } else { if (BUILTIN_TYPE(obj) == T_ZOMBIE) { @@ -7597,28 +7281,13 @@ gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj) int free_objects = 0; int zombie_objects = 0; int stride = 1; -#if USE_RVARGC - int default_stride = 1; -#endif for (i=0; itotal_slots; i+=stride) { VALUE val = (VALUE)&page->start[i]; void *poisoned = asan_poisoned_object_p(val); asan_unpoison_object(val, false); -#if USE_RVARGC - if (BUILTIN_TYPE(val) == T_PAYLOAD) { - stride = RPAYLOAD_LEN(val); - } - else { - stride = default_stride; - } -#endif - if (RBASIC(val) == 0) free_objects++; -#if USE_RVARGC - if (BUILTIN_TYPE(val) == T_PAYLOAD) stride = RPAYLOAD_LEN(val); -#endif if (BUILTIN_TYPE(val) == T_ZOMBIE) zombie_objects++; if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) { has_remembered_shady = TRUE; @@ -10054,9 +9723,6 @@ gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t * objspace, case T_MOVED: case T_ZOMBIE: break; - case T_PAYLOAD: - v += (stride * (RPAYLOAD_LEN(v) - 1)); - break; default: if (RVALUE_WB_UNPROTECTED(v)) { page->flags.has_uncollectible_shady_objects = TRUE; @@ -12941,7 +12607,6 @@ type_name(int type, VALUE obj) TYPE_NAME(T_ICLASS); TYPE_NAME(T_MOVED); TYPE_NAME(T_ZOMBIE); - TYPE_NAME(T_PAYLOAD); case T_DATA: if (obj && rb_objspace_data_type_name(obj)) { return rb_objspace_data_type_name(obj); @@ -13055,9 +12720,6 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj) if (internal_object_p(obj)) { /* ignore */ } - else if (type == T_PAYLOAD) { - /* ignore */ - } else if (RBASIC(obj)->klass == 0) { APPENDF((BUFF_ARGS, "(temporary internal)")); } @@ -13075,9 +12737,6 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj) #endif switch (type) { - case T_PAYLOAD: - APPENDF((BUFF_ARGS, "len: %i", RPAYLOAD_LEN(obj))); - break; case T_NODE: UNEXPECTED_NODE(rb_raw_obj_info); break; diff --git a/include/ruby/internal/value_type.h b/include/ruby/internal/value_type.h index 5642686c7b..e449cfab37 100644 --- a/include/ruby/internal/value_type.h +++ b/include/ruby/internal/value_type.h @@ -81,7 +81,6 @@ #define T_TRUE RUBY_T_TRUE #define T_UNDEF RUBY_T_UNDEF #define T_ZOMBIE RUBY_T_ZOMBIE -#define T_PAYLOAD RUBY_T_PAYLOAD #define BUILTIN_TYPE RB_BUILTIN_TYPE #define DYNAMIC_SYM_P RB_DYNAMIC_SYM_P @@ -134,7 +133,6 @@ ruby_value_type { RUBY_T_SYMBOL = 0x14, /**< @see struct ::RSymbol */ RUBY_T_FIXNUM = 0x15, /**< Integers formerly known as Fixnums. */ RUBY_T_UNDEF = 0x16, /**< @see ::RUBY_Qundef */ - RUBY_T_PAYLOAD = 0x17, /**< @see ::RPayload */ RUBY_T_IMEMO = 0x1a, /**< @see struct ::RIMemo */ RUBY_T_NODE = 0x1b, /**< @see struct ::RNode */ diff --git a/internal/gc.h b/internal/gc.h index 42d82aac76..7d1efebfca 100644 --- a/internal/gc.h +++ b/internal/gc.h @@ -71,8 +71,6 @@ struct rb_objspace; /* in vm_core.h */ rb_obj_write((VALUE)(a), UNALIGNED_MEMBER_ACCESS((VALUE *)(slot)), \ (VALUE)(b), __FILE__, __LINE__) -#define RVARGC_PAYLOAD_INIT(obj, size) (void *)rb_rvargc_payload_init((VALUE)obj, (size_t)size) - typedef struct ractor_newobj_cache { struct RVALUE *freelist; struct heap_page *using_page; @@ -110,8 +108,6 @@ const char *rb_objspace_data_type_name(VALUE obj); VALUE rb_wb_protected_newobj_of(VALUE, VALUE, size_t); VALUE rb_wb_unprotected_newobj_of(VALUE, VALUE, size_t); VALUE rb_ec_wb_protected_newobj_of(struct rb_execution_context_struct *ec, VALUE klass, VALUE flags, size_t); -VALUE rb_rvargc_payload_init(VALUE obj, size_t size); -void * rb_rvargc_payload_data_ptr(VALUE obj); size_t rb_obj_memsize_of(VALUE); void rb_gc_verify_internal_consistency(void); size_t rb_obj_gc_flags(VALUE, ID[], size_t); diff --git a/misc/lldb_cruby.py b/misc/lldb_cruby.py index 3f0479b51f..1dbdc03eee 100755 --- a/misc/lldb_cruby.py +++ b/misc/lldb_cruby.py @@ -330,9 +330,6 @@ def lldb_inspect(debugger, target, result, val): elif flType == RUBY_T_HASH: result.write("T_HASH: %s" % flaginfo) append_command_output(debugger, "p *(struct RHash *) %0#x" % val.GetValueAsUnsigned(), result) - elif flType == RUBY_T_PAYLOAD: - result.write("T_PAYLOAD: %s" % flaginfo) - append_command_output(debugger, "p *(struct RPayload *) %0#x" % val.GetValueAsUnsigned(), result) elif flType == RUBY_T_BIGNUM: tRBignum = target.FindFirstType("struct RBignum").GetPointerType() val = val.Cast(tRBignum) diff --git a/vm_eval.c b/vm_eval.c index 7ce9f157e6..8a0166e1ee 100644 --- a/vm_eval.c +++ b/vm_eval.c @@ -731,7 +731,6 @@ rb_type_str(enum ruby_value_type type) case type_case(T_ICLASS); case type_case(T_ZOMBIE); case type_case(T_MOVED); - case type_case(T_PAYLOAD); case T_MASK: break; } #undef type_case