зеркало из https://github.com/github/ruby.git
[Feature #18045] Implement size classes for GC
This commits implements size classes in the GC for the Variable Width Allocation feature. Unless `USE_RVARGC` compile flag is set, only a single size class is created, maintaining current behaviour. See the redmine ticket for more details. Co-authored-by: Aaron Patterson <tenderlove@ruby-lang.org>
This commit is contained in:
Родитель
48ff7a9f3e
Коммит
b2e2cf2ded
3
class.c
3
class.c
|
@ -185,8 +185,7 @@ class_alloc(VALUE flags, VALUE klass)
|
||||||
RVARGC_NEWOBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0), payload_size);
|
RVARGC_NEWOBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0), payload_size);
|
||||||
|
|
||||||
#if USE_RVARGC
|
#if USE_RVARGC
|
||||||
obj->ptr = (rb_classext_t *)rb_rvargc_payload_data_ptr((VALUE)obj + rb_slot_size());
|
obj->ptr = (rb_classext_t *)rb_gc_rvargc_object_data((VALUE)obj);
|
||||||
RB_OBJ_WRITTEN(obj, Qundef, (VALUE)obj + rb_slot_size());
|
|
||||||
#else
|
#else
|
||||||
obj->ptr = ZALLOC(rb_classext_t);
|
obj->ptr = ZALLOC(rb_classext_t);
|
||||||
#endif
|
#endif
|
||||||
|
|
1077
gc.c
1077
gc.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -101,6 +101,7 @@ static inline void *ruby_sized_xrealloc2_inlined(void *ptr, size_t new_count, si
|
||||||
static inline void ruby_sized_xfree_inlined(void *ptr, size_t size);
|
static inline void ruby_sized_xfree_inlined(void *ptr, size_t size);
|
||||||
VALUE rb_class_allocate_instance(VALUE klass);
|
VALUE rb_class_allocate_instance(VALUE klass);
|
||||||
void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache);
|
void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache);
|
||||||
|
void *rb_gc_rvargc_object_data(VALUE obj);
|
||||||
|
|
||||||
RUBY_SYMBOL_EXPORT_BEGIN
|
RUBY_SYMBOL_EXPORT_BEGIN
|
||||||
/* gc.c (export) */
|
/* gc.c (export) */
|
||||||
|
@ -116,7 +117,6 @@ void rb_gc_mark_vm_stack_values(long n, const VALUE *values);
|
||||||
void *ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2));
|
void *ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2));
|
||||||
void *ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2, 3));
|
void *ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2, 3));
|
||||||
void ruby_sized_xfree(void *x, size_t size);
|
void ruby_sized_xfree(void *x, size_t size);
|
||||||
int rb_slot_size(void);
|
|
||||||
RUBY_SYMBOL_EXPORT_END
|
RUBY_SYMBOL_EXPORT_END
|
||||||
|
|
||||||
MJIT_SYMBOL_EXPORT_BEGIN
|
MJIT_SYMBOL_EXPORT_BEGIN
|
||||||
|
|
|
@ -533,14 +533,15 @@ class HeapPageIter:
|
||||||
self.target = target
|
self.target = target
|
||||||
self.start = page.GetChildMemberWithName('start').GetValueAsUnsigned();
|
self.start = page.GetChildMemberWithName('start').GetValueAsUnsigned();
|
||||||
self.num_slots = page.GetChildMemberWithName('total_slots').unsigned
|
self.num_slots = page.GetChildMemberWithName('total_slots').unsigned
|
||||||
|
self.slot_size = page.GetChildMemberWithName('size_pool').GetChildMemberWithName('slot_size').unsigned
|
||||||
self.counter = 0
|
self.counter = 0
|
||||||
self.tRBasic = target.FindFirstType("struct RBasic")
|
self.tRBasic = target.FindFirstType("struct RBasic")
|
||||||
self.tRValue = target.FindFirstType("struct RVALUE")
|
self.tRValue = target.FindFirstType("struct RVALUE")
|
||||||
|
|
||||||
def is_valid(self):
|
def is_valid(self):
|
||||||
heap_page_header_size = self.target.FindFirstType("struct heap_page_header").GetByteSize()
|
heap_page_header_size = self.target.FindFirstType("struct heap_page_header").GetByteSize()
|
||||||
rvalue_size = self.tRValue.GetByteSize()
|
rvalue_size = self.slot_size
|
||||||
heap_page_obj_limit = int((HEAP_PAGE_SIZE - heap_page_header_size) / rvalue_size)
|
heap_page_obj_limit = int((HEAP_PAGE_SIZE - heap_page_header_size) / self.slot_size)
|
||||||
|
|
||||||
return (heap_page_obj_limit - 1) <= self.num_slots <= heap_page_obj_limit
|
return (heap_page_obj_limit - 1) <= self.num_slots <= heap_page_obj_limit
|
||||||
|
|
||||||
|
@ -549,7 +550,7 @@ class HeapPageIter:
|
||||||
|
|
||||||
def __next__(self):
|
def __next__(self):
|
||||||
if self.counter < self.num_slots:
|
if self.counter < self.num_slots:
|
||||||
obj_addr_i = self.start + (self.counter * self.tRValue.GetByteSize())
|
obj_addr_i = self.start + (self.counter * self.slot_size)
|
||||||
obj_addr = lldb.SBAddress(obj_addr_i, self.target)
|
obj_addr = lldb.SBAddress(obj_addr_i, self.target)
|
||||||
slot_info = (self.counter, obj_addr_i, self.target.CreateValueFromAddress("object", obj_addr, self.tRBasic))
|
slot_info = (self.counter, obj_addr_i, self.target.CreateValueFromAddress("object", obj_addr, self.tRBasic))
|
||||||
self.counter += 1
|
self.counter += 1
|
||||||
|
|
Загрузка…
Ссылка в новой задаче