* gc.c : don't embed struct heaps_slot to a heap block because it

can causes copy-on-write of memory page on heap block when its
  free_next is rewirted.

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@34247 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
nari 2012-01-09 11:58:12 +00:00
Родитель afb5eef4e2
Коммит b78bddee35
2 изменённых файлов: 36 добавлений и 13 удалений

Просмотреть файл

@ -1,3 +1,9 @@
Mon Jan 9 20:55:34 2012 Narihiro Nakamura <authornari@gmail.com>
* gc.c : don't embed struct heaps_slot to a heap block because it
can causes copy-on-write of memory page on heap block when its
free_next is rewirted.
Mon Jan 9 20:26:33 2012 Tanaka Akira <akr@fsij.org>
* ext/pathname/pathname.c (path_entries): add document suggested by

43
gc.c
Просмотреть файл

@ -313,6 +313,7 @@ typedef struct RVALUE {
#endif
struct heaps_slot {
void *membase;
RVALUE *slot;
size_t limit;
uintptr_t *bits;
@ -322,6 +323,11 @@ struct heaps_slot {
struct heaps_slot *free_next;
};
struct heaps_header {
struct heaps_slot *base;
uintptr_t *bits;
};
struct sorted_heaps_slot {
RVALUE *start;
RVALUE *end;
@ -429,7 +435,7 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
#define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
#define HEAP_SLOT(p) ((struct heaps_slot *)(p))
#define HEAP_HEADER(p) ((struct heaps_header *)(p))
static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
@ -522,7 +528,8 @@ rb_objspace_free(rb_objspace_t *objspace)
size_t i;
for (i = 0; i < heaps_used; ++i) {
free(objspace->heap.sorted[i].slot->bits);
aligned_free(objspace->heap.sorted[i].slot);
aligned_free(objspace->heap.sorted[i].slot->membase);
free(objspace->heap.sorted[i].slot);
}
free(objspace->heap.sorted);
heaps_used = 0;
@ -542,8 +549,9 @@ rb_objspace_free(rb_objspace_t *objspace)
#define HEAP_OBJ_LIMIT (HEAP_SIZE/(unsigned int)sizeof(struct RVALUE) - (unsigned int)(sizeof(struct heaps_slot)/sizeof(struct RVALUE)+1))
#define HEAP_BITMAP_LIMIT (HEAP_OBJ_LIMIT/sizeof(uintptr_t)+1)
#define GET_HEAP_SLOT(x) (HEAP_SLOT(((uintptr_t)x) & ~(HEAP_ALIGN_MASK)))
#define GET_HEAP_BITMAP(x) (GET_HEAP_SLOT(x)->bits)
#define GET_HEAP_HEADER(x) (HEAP_HEADER(((uintptr_t)x) & ~(HEAP_ALIGN_MASK)))
#define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
#define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
#define NUM_IN_SLOT(p) (((uintptr_t)p & HEAP_ALIGN_MASK)/sizeof(RVALUE))
#define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * 8))
#define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * 8)-1))
@ -1120,7 +1128,12 @@ assign_heap_slot(rb_objspace_t *objspace)
during_gc = 0;
rb_memerror();
}
slot = (struct heaps_slot *)p;
slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
if (slot == 0) {
aligned_free(p);
during_gc = 0;
rb_memerror();
}
MEMZERO((void*)slot, struct heaps_slot, 1);
slot->next = heaps;
@ -1128,7 +1141,7 @@ assign_heap_slot(rb_objspace_t *objspace)
heaps = slot;
membase = p;
p = (RVALUE*)((VALUE)p + sizeof(struct heaps_slot));
p = (RVALUE*)((VALUE)p + sizeof(struct heaps_header));
if ((VALUE)p % sizeof(RVALUE) != 0) {
p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
@ -1141,7 +1154,7 @@ assign_heap_slot(rb_objspace_t *objspace)
while (lo < hi) {
register RVALUE *mid_membase;
mid = (lo + hi) / 2;
mid_membase = (void *)objspace->heap.sorted[mid].slot;
mid_membase = objspace->heap.sorted[mid].slot->membase;
if (mid_membase < membase) {
lo = mid + 1;
}
@ -1158,11 +1171,14 @@ assign_heap_slot(rb_objspace_t *objspace)
objspace->heap.sorted[hi].slot = slot;
objspace->heap.sorted[hi].start = p;
objspace->heap.sorted[hi].end = (p + objs);
heaps->membase = membase;
heaps->slot = p;
heaps->limit = objs;
assert(objspace->heap.free_bitmap != NULL);
heaps->bits = (uintptr_t *)objspace->heap.free_bitmap;
objspace->heap.free_bitmap = objspace->heap.free_bitmap->next;
HEAP_HEADER(membase)->base = heaps;
HEAP_HEADER(membase)->bits = heaps->bits;
memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
objspace->heap.free_num += objs;
pend = p + objs;
@ -2117,11 +2133,12 @@ free_unused_heaps(rb_objspace_t *objspace)
objspace->heap.free_bitmap;
objspace->heap.free_bitmap = (struct heaps_free_bitmap *)h->bits;
if (!last) {
last = (RVALUE *)objspace->heap.sorted[i].slot;
last = objspace->heap.sorted[i].slot->membase;
}
else {
aligned_free(objspace->heap.sorted[i].slot);
aligned_free(objspace->heap.sorted[i].slot->membase);
}
free(objspace->heap.sorted[i].slot);
heaps_used--;
}
else {
@ -2204,7 +2221,7 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
if (free_num > 0) {
link_free_heap_slot(objspace, sweep_slot);
}
else if (sweep_slot->free_next != NULL) {
else {
sweep_slot->free_next = NULL;
}
objspace->heap.free_num += free_num;
@ -2789,13 +2806,13 @@ objspace_each_objects(VALUE arg)
i = 0;
while (i < heaps_used) {
while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot)
while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase)
i--;
while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot <= (uintptr_t)membase)
while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase)
i++;
if (heaps_used <= i)
break;
membase = (RVALUE *)objspace->heap.sorted[i].slot;
membase = objspace->heap.sorted[i].slot->membase;
pstart = objspace->heap.sorted[i].slot->slot;
pend = pstart + objspace->heap.sorted[i].slot->limit;