зеркало из https://github.com/github/ruby.git
Disable auto compaction on platforms that can't support it
Both explicit compaction routines (gc_compact and the verify references form) need to clear the heap before executing compaction. Otherwise some objects may not be alive, and we'll need the read barrier. The heap must only contain *live* objects if we want to disable the read barrier during explicit compaction. The previous commit was missing the "clear the heap" phase from the "verify references" explicit compaction function. Fixes [Bug #17306]
This commit is contained in:
Родитель
7aaf6676c4
Коммит
c32218de1b
65
gc.c
65
gc.c
|
@ -682,7 +682,7 @@ typedef struct rb_objspace {
|
|||
unsigned int dont_gc : 1;
|
||||
unsigned int dont_incremental : 1;
|
||||
unsigned int during_gc : 1;
|
||||
unsigned int during_compacting : 1;
|
||||
unsigned int during_compacting : 2;
|
||||
unsigned int gc_stressful: 1;
|
||||
unsigned int has_hook: 1;
|
||||
unsigned int during_minor_gc : 1;
|
||||
|
@ -3090,6 +3090,17 @@ Init_heap(void)
|
|||
{
|
||||
rb_objspace_t *objspace = &rb_objspace;
|
||||
|
||||
#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
|
||||
/* If Ruby's heap pages are not a multiple of the system page size, we
|
||||
* cannot use mprotect for the read barrier, so we must disable automatic
|
||||
* compaction. */
|
||||
int pagesize;
|
||||
pagesize = (int)sysconf(_SC_PAGE_SIZE);
|
||||
if ((HEAP_PAGE_SIZE % pagesize) != 0) {
|
||||
ruby_enable_autocompact = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
|
||||
objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
|
||||
objspace->obj_to_id_tbl = st_init_numtable();
|
||||
|
@ -4389,6 +4400,11 @@ static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free);
|
|||
static void
|
||||
lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
|
||||
{
|
||||
/* If this is an explicit compaction (GC.compact), we don't need a read
|
||||
* barrier, so just return early. */
|
||||
if (objspace->flags.during_compacting >> 1) {
|
||||
return;
|
||||
}
|
||||
#if defined(_WIN32)
|
||||
DWORD old_protect;
|
||||
|
||||
|
@ -4405,6 +4421,11 @@ lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
|
|||
static void
|
||||
unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
|
||||
{
|
||||
/* If this is an explicit compaction (GC.compact), we don't need a read
|
||||
* barrier, so just return early. */
|
||||
if (objspace->flags.during_compacting >> 1) {
|
||||
return;
|
||||
}
|
||||
#if defined(_WIN32)
|
||||
DWORD old_protect;
|
||||
|
||||
|
@ -4624,8 +4645,12 @@ gc_compact_finish(rb_objspace_t *objspace, rb_heap_t *heap)
|
|||
{
|
||||
GC_ASSERT(heap->sweeping_page == heap->compact_cursor);
|
||||
|
||||
/* If this is an explicit compaction (GC.compact), no read barrier was set
|
||||
* so we don't need to unprotect pages or uninstall the SEGV handler */
|
||||
if (!(objspace->flags.during_compacting >> 1)) {
|
||||
gc_unprotect_pages(objspace, heap);
|
||||
uninstall_handlers();
|
||||
}
|
||||
|
||||
/* The mutator is allowed to run during incremental sweeping. T_MOVED
|
||||
* objects can get pushed on the stack and when the compaction process
|
||||
|
@ -5134,6 +5159,12 @@ gc_compact_start(rb_objspace_t *objspace, rb_heap_t *heap)
|
|||
memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
|
||||
memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
|
||||
|
||||
/* If this is an explicit compaction (GC.compact), we don't need a read
|
||||
* barrier, so just return early. */
|
||||
if (objspace->flags.during_compacting >> 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Set up read barrier for pages containing MOVED objects */
|
||||
install_handlers();
|
||||
}
|
||||
|
@ -7030,7 +7061,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)
|
|||
#endif
|
||||
objspace->flags.during_minor_gc = FALSE;
|
||||
if (ruby_enable_autocompact) {
|
||||
objspace->flags.during_compacting = TRUE;
|
||||
objspace->flags.during_compacting |= TRUE;
|
||||
}
|
||||
objspace->profile.major_gc_count++;
|
||||
objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
|
||||
|
@ -8057,7 +8088,9 @@ gc_start(rb_objspace_t *objspace, int reason)
|
|||
|
||||
/* reason may be clobbered, later, so keep set immediate_sweep here */
|
||||
objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
|
||||
objspace->flags.during_compacting = !!((unsigned)reason & GPR_FLAG_COMPACT);
|
||||
|
||||
/* Explicitly enable compaction (GC.compact) */
|
||||
objspace->flags.during_compacting = (!!((unsigned)reason & GPR_FLAG_COMPACT) << 1);
|
||||
|
||||
if (!heap_allocated_pages) return FALSE; /* heap is not ready */
|
||||
if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
|
||||
|
@ -9247,11 +9280,27 @@ heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static VALUE
|
||||
gc_compact(rb_execution_context_t *ec, VALUE self)
|
||||
{
|
||||
/* Clear the heap. */
|
||||
gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse);
|
||||
|
||||
/* At this point, all references are live and the mutator is not allowed
|
||||
* to run, so we don't need a read barrier. */
|
||||
gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
|
||||
|
||||
return gc_compact_stats(ec, self);
|
||||
}
|
||||
|
||||
static VALUE
|
||||
gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE toward_empty)
|
||||
{
|
||||
rb_objspace_t *objspace = &rb_objspace;
|
||||
|
||||
/* Clear the heap. */
|
||||
gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse);
|
||||
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
gc_rest(objspace);
|
||||
|
@ -9865,6 +9914,16 @@ gc_disable(rb_execution_context_t *ec, VALUE _)
|
|||
static VALUE
|
||||
gc_set_auto_compact(rb_execution_context_t *ec, VALUE _, VALUE v)
|
||||
{
|
||||
#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
|
||||
/* If Ruby's heap pages are not a multiple of the system page size, we
|
||||
* cannot use mprotect for the read barrier, so we must disable automatic
|
||||
* compaction. */
|
||||
int pagesize;
|
||||
pagesize = (int)sysconf(_SC_PAGE_SIZE);
|
||||
if ((HEAP_PAGE_SIZE % pagesize) != 0) {
|
||||
rb_raise(rb_eNotImpError, "Automatic compaction isn't available on this platform");
|
||||
}
|
||||
#endif
|
||||
ruby_enable_autocompact = RTEST(v);
|
||||
return v;
|
||||
}
|
||||
|
|
3
gc.rb
3
gc.rb
|
@ -199,8 +199,7 @@ module GC
|
|||
end
|
||||
|
||||
def self.compact
|
||||
Primitive.gc_start_internal true, true, true, true
|
||||
Primitive.gc_compact_stats
|
||||
Primitive.gc_compact
|
||||
end
|
||||
|
||||
# call-seq:
|
||||
|
|
|
@ -1,8 +1,15 @@
|
|||
# frozen_string_literal: true
|
||||
require 'test/unit'
|
||||
require 'fiddle'
|
||||
require 'etc'
|
||||
|
||||
class TestGCCompact < Test::Unit::TestCase
|
||||
class AutoCompact < Test::Unit::TestCase
|
||||
def setup
|
||||
skip "autocompact not supported on this platform" unless supports_auto_compact?
|
||||
super
|
||||
end
|
||||
|
||||
def test_enable_autocompact
|
||||
before = GC.auto_compact
|
||||
GC.auto_compact = true
|
||||
|
@ -53,6 +60,25 @@ class TestGCCompact < Test::Unit::TestCase
|
|||
GC.auto_compact = before
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def supports_auto_compact?
|
||||
return true unless defined?(Etc::SC_PAGE_SIZE)
|
||||
|
||||
begin
|
||||
return GC::INTERNAL_CONSTANTS[:HEAP_PAGE_SIZE] % Etc.sysconf(Etc::SC_PAGE_SIZE) == 0
|
||||
rescue NotImplementedError
|
||||
rescue ArgumentError
|
||||
end
|
||||
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
def os_page_size
|
||||
return true unless defined?(Etc::SC_PAGE_SIZE)
|
||||
end
|
||||
|
||||
def test_gc_compact_stats
|
||||
list = []
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче