8330694: Rename 'HeapRegion' to 'G1HeapRegion'

Reviewed-by: cjplummer, kbarrett, tschatzl
This commit is contained in:
Lei Zaakjyu 2024-05-25 02:10:05 +00:00 коммит произвёл Kim Barrett
Родитель 05f13e75ee
Коммит 985b9ce79a
123 изменённых файлов: 999 добавлений и 1002 удалений

Просмотреть файл

@ -207,7 +207,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
// Does store cross heap regions?
__ eor(tmp1, store_addr, new_val);
__ lsr(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
__ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);
__ cbz(tmp1, done);
// crosses regions, storing null?

Просмотреть файл

@ -207,7 +207,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
// Does store cross heap regions?
__ eor(tmp1, store_addr, new_val);
__ movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
__ movs(tmp1, AsmOperand(tmp1, lsr, G1HeapRegion::LogOfHRGrainBytes));
__ b(done, eq);
// crosses regions, storing null?

Просмотреть файл

@ -243,7 +243,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
// Does store cross heap regions?
__ xorr(tmp1, store_addr, new_val);
__ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
__ srdi_(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);
__ beq(CCR0, filtered);
// Crosses regions, storing null?

Просмотреть файл

@ -194,7 +194,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
// Does store cross heap regions?
__ xorr(tmp1, store_addr, new_val);
__ srli(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
__ srli(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);
__ beqz(tmp1, done);
// crosses regions, storing null?

Просмотреть файл

@ -282,7 +282,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
__ z_lgr(Rtmp1, Rstore_addr);
__ z_xgr(Rtmp1, Rnew_val);
}
__ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
__ z_srag(Rtmp1, Rtmp1, G1HeapRegion::LogOfHRGrainBytes);
__ z_bre(filtered);
// Crosses regions, storing null?

Просмотреть файл

@ -286,7 +286,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ movptr(tmp, store_addr);
__ xorptr(tmp, new_val);
__ shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
__ shrptr(tmp, G1HeapRegion::LogOfHRGrainBytes);
__ jcc(Assembler::equal, done);
// crosses regions, storing null?

Просмотреть файл

@ -89,7 +89,7 @@ void ArchiveHeapWriter::init() {
_source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
guarantee(UseG1GC, "implementation limitation");
guarantee(MIN_GC_REGION_ALIGNMENT <= /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
}
}
@ -439,7 +439,7 @@ void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) {
if (UseCompressedOops) {
_requested_bottom = align_down(heap_end - heap_region_byte_size, HeapRegion::GrainBytes);
_requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
} else {
// We always write the objects as if the heap started at this address. This
// makes the contents of the archive heap deterministic.
@ -449,7 +449,7 @@ void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) {
_requested_bottom = (address)NOCOOPS_REQUESTED_BASE;
}
assert(is_aligned(_requested_bottom, HeapRegion::GrainBytes), "sanity");
assert(is_aligned(_requested_bottom, G1HeapRegion::GrainBytes), "sanity");
_requested_top = _requested_bottom + _buffer_used;

Просмотреть файл

@ -2064,8 +2064,8 @@ bool FileMapInfo::can_use_heap_region() {
archive_narrow_klass_shift);
log_info(cds)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift());
log_info(cds)("The current max heap size = " SIZE_FORMAT "M, HeapRegion::GrainBytes = " SIZE_FORMAT,
MaxHeapSize/M, HeapRegion::GrainBytes);
log_info(cds)("The current max heap size = " SIZE_FORMAT "M, G1HeapRegion::GrainBytes = " SIZE_FORMAT,
MaxHeapSize/M, G1HeapRegion::GrainBytes);
log_info(cds)(" narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
log_info(cds)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
@ -2130,7 +2130,7 @@ bool FileMapInfo::map_heap_region() {
#ifdef ASSERT
// The "old" regions must be parsable -- we cannot have any unused space
// at the start of the lowest G1 region that contains archived objects.
assert(is_aligned(_mapped_heap_memregion.start(), HeapRegion::GrainBytes), "must be");
assert(is_aligned(_mapped_heap_memregion.start(), G1HeapRegion::GrainBytes), "must be");
// Make sure we map at the very top of the heap - see comments in
// init_heap_region_relocation().
@ -2140,7 +2140,7 @@ bool FileMapInfo::map_heap_region() {
address heap_end = (address)heap_range.end();
address mapped_heap_region_end = (address)_mapped_heap_memregion.end();
assert(heap_end >= mapped_heap_region_end, "must be");
assert(heap_end - mapped_heap_region_end < (intx)(HeapRegion::GrainBytes),
assert(heap_end - mapped_heap_region_end < (intx)(G1HeapRegion::GrainBytes),
"must be at the top of the heap to avoid fragmentation");
#endif

Просмотреть файл

@ -158,13 +158,13 @@ void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_v
__ logical_xor(xor_res, new_val, xor_res);
__ move(xor_res, xor_shift_res);
__ unsigned_shift_right(xor_shift_res,
LIR_OprFact::intConst(checked_cast<jint>(HeapRegion::LogOfHRGrainBytes)),
LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
xor_shift_res,
LIR_Opr::illegalOpr());
} else {
__ logical_xor(addr, new_val, xor_res);
__ unsigned_shift_right(xor_res,
LIR_OprFact::intConst(checked_cast<jint>(HeapRegion::LogOfHRGrainBytes)),
LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
xor_shift_res,
LIR_Opr::illegalOpr());
}

Просмотреть файл

@ -453,8 +453,8 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit,
// Should be able to do an unsigned compare of region_size instead of
// and extra shift. Do we have an unsigned compare??
// Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(checked_cast<jint>(HeapRegion::LogOfHRGrainBytes)));
// Node* region_size = __ ConI(1 << G1HeapRegion::LogOfHRGrainBytes);
Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)));
// if (xor_res == 0) same region so skip
__ if_then(xor_res, BoolTest::ne, zeroX, likely); {

Просмотреть файл

@ -34,9 +34,9 @@
#include "utilities/align.hpp"
G1CollectedHeap* G1AllocRegion::_g1h = nullptr;
HeapRegion* G1AllocRegion::_dummy_region = nullptr;
G1HeapRegion* G1AllocRegion::_dummy_region = nullptr;
void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
void G1AllocRegion::setup(G1CollectedHeap* g1h, G1HeapRegion* dummy_region) {
assert(_dummy_region == nullptr, "should be set once");
assert(dummy_region != nullptr, "pre-condition");
assert(dummy_region->free() == 0, "pre-condition");
@ -50,7 +50,7 @@ void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
_dummy_region = dummy_region;
}
size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) {
size_t G1AllocRegion::fill_up_remaining_space(G1HeapRegion* alloc_region) {
assert(alloc_region != nullptr && alloc_region != _dummy_region,
"pre-condition");
size_t result = 0;
@ -94,7 +94,7 @@ size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) {
return result;
}
size_t G1AllocRegion::retire_internal(HeapRegion* alloc_region, bool fill_up) {
size_t G1AllocRegion::retire_internal(G1HeapRegion* alloc_region, bool fill_up) {
// We never have to check whether the active region is empty or not,
// and potentially free it if it is, given that it's guaranteed that
// it will never be empty.
@ -120,7 +120,7 @@ size_t G1AllocRegion::retire(bool fill_up) {
size_t waste = 0;
trace("retiring");
HeapRegion* alloc_region = _alloc_region;
G1HeapRegion* alloc_region = _alloc_region;
if (alloc_region != _dummy_region) {
waste = retire_internal(alloc_region, fill_up);
reset_alloc_region();
@ -135,7 +135,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size) {
assert_alloc_region(_used_bytes_before == 0, "pre-condition");
trace("attempting region allocation");
HeapRegion* new_alloc_region = allocate_new_region(word_size);
G1HeapRegion* new_alloc_region = allocate_new_region(word_size);
if (new_alloc_region != nullptr) {
new_alloc_region->reset_pre_dummy_top();
// Need to do this before the allocation
@ -166,7 +166,7 @@ void G1AllocRegion::init() {
trace("initialized");
}
void G1AllocRegion::set(HeapRegion* alloc_region) {
void G1AllocRegion::set(G1HeapRegion* alloc_region) {
trace("setting");
// We explicitly check that the region is not empty to make sure we
// maintain the "the alloc region cannot be empty" invariant.
@ -181,7 +181,7 @@ void G1AllocRegion::set(HeapRegion* alloc_region) {
trace("set");
}
void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
void G1AllocRegion::update_alloc_region(G1HeapRegion* alloc_region) {
trace("update");
// We explicitly check that the region is not empty to make sure we
// maintain the "the alloc region cannot be empty" invariant.
@ -192,9 +192,9 @@ void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
trace("updated");
}
HeapRegion* G1AllocRegion::release() {
G1HeapRegion* G1AllocRegion::release() {
trace("releasing");
HeapRegion* alloc_region = _alloc_region;
G1HeapRegion* alloc_region = _alloc_region;
retire(false /* fill_up */);
assert_alloc_region(_alloc_region == _dummy_region, "post-condition of retire()");
_alloc_region = nullptr;
@ -257,11 +257,11 @@ G1AllocRegion::G1AllocRegion(const char* name,
_node_index(node_index)
{ }
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size) {
G1HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size) {
return _g1h->new_mutator_alloc_region(word_size, _node_index);
}
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
void MutatorAllocRegion::retire_region(G1HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
}
@ -272,7 +272,7 @@ void MutatorAllocRegion::init() {
_wasted_bytes = 0;
}
bool MutatorAllocRegion::should_retain(HeapRegion* region) {
bool MutatorAllocRegion::should_retain(G1HeapRegion* region) {
size_t free_bytes = region->free();
if (free_bytes < MinTLABSize) {
return false;
@ -289,7 +289,7 @@ bool MutatorAllocRegion::should_retain(HeapRegion* region) {
size_t MutatorAllocRegion::retire(bool fill_up) {
size_t waste = 0;
trace("retiring");
HeapRegion* current_region = get();
G1HeapRegion* current_region = get();
if (current_region != nullptr) {
// Retain the current region if it fits a TLAB and has more
// free than the currently retained region.
@ -312,7 +312,7 @@ size_t MutatorAllocRegion::retire(bool fill_up) {
size_t MutatorAllocRegion::used_in_alloc_regions() {
size_t used = 0;
HeapRegion* hr = get();
G1HeapRegion* hr = get();
if (hr != nullptr) {
used += hr->used();
}
@ -324,8 +324,8 @@ size_t MutatorAllocRegion::used_in_alloc_regions() {
return used;
}
HeapRegion* MutatorAllocRegion::release() {
HeapRegion* ret = G1AllocRegion::release();
G1HeapRegion* MutatorAllocRegion::release() {
G1HeapRegion* ret = G1AllocRegion::release();
// The retained alloc region must be retired and this must be
// done after the above call to release the mutator alloc region,
@ -338,21 +338,21 @@ HeapRegion* MutatorAllocRegion::release() {
count(),
byte_size_in_proper_unit(_wasted_bytes),
proper_unit_for_byte_size(_wasted_bytes),
percent_of(_wasted_bytes, count() * HeapRegion::GrainBytes));
percent_of(_wasted_bytes, count() * G1HeapRegion::GrainBytes));
return ret;
}
HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size) {
G1HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size) {
return _g1h->new_gc_alloc_region(word_size, _purpose, _node_index);
}
void G1GCAllocRegion::retire_region(HeapRegion* alloc_region,
void G1GCAllocRegion::retire_region(G1HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, _purpose);
}
size_t G1GCAllocRegion::retire(bool fill_up) {
HeapRegion* retired = get();
G1HeapRegion* retired = get();
size_t end_waste = G1AllocRegion::retire(fill_up);
// Do not count retirement of the dummy allocation region.
if (retired != nullptr) {

Просмотреть файл

@ -52,7 +52,7 @@ private:
// then _alloc_region is null and this object should not be used to
// satisfy allocation requests (it was done this way to force the
// correct use of init() and release()).
HeapRegion* volatile _alloc_region;
G1HeapRegion* volatile _alloc_region;
// It keeps track of the distinct number of regions that are used
// for allocation in the active interval of this object, i.e.,
@ -76,11 +76,11 @@ private:
// == end()). When we don't have a valid active region we make
// _alloc_region point to this. This allows us to skip checking
// whether the _alloc_region is null or not.
static HeapRegion* _dummy_region;
static G1HeapRegion* _dummy_region;
// After a region is allocated by alloc_new_region, this
// method is used to set it as the active alloc_region
void update_alloc_region(HeapRegion* alloc_region);
void update_alloc_region(G1HeapRegion* alloc_region);
// Allocate a new active region and use it to perform a word_size
// allocation.
@ -98,17 +98,17 @@ protected:
void reset_alloc_region();
// Perform a non-MT-safe allocation out of the given region.
inline HeapWord* allocate(HeapRegion* alloc_region,
inline HeapWord* allocate(G1HeapRegion* alloc_region,
size_t word_size);
// Perform a MT-safe allocation out of the given region.
inline HeapWord* par_allocate(HeapRegion* alloc_region,
inline HeapWord* par_allocate(G1HeapRegion* alloc_region,
size_t word_size);
// Perform a MT-safe allocation out of the given region, with the given
// minimum and desired size. Returns the actual size allocated (between
// minimum and desired size) in actual_word_size if the allocation has been
// successful.
inline HeapWord* par_allocate(HeapRegion* alloc_region,
inline HeapWord* par_allocate(G1HeapRegion* alloc_region,
size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
@ -117,7 +117,7 @@ protected:
// so that no one else can allocate out of it any more.
// Returns the number of bytes that have been wasted by filled up
// the space.
size_t fill_up_remaining_space(HeapRegion* alloc_region);
size_t fill_up_remaining_space(G1HeapRegion* alloc_region);
// Retire the active allocating region. If fill_up is true then make
// sure that the region is full before we retire it so that no one
@ -125,22 +125,22 @@ protected:
// Returns the number of bytes that have been filled up during retire.
virtual size_t retire(bool fill_up);
size_t retire_internal(HeapRegion* alloc_region, bool fill_up);
size_t retire_internal(G1HeapRegion* alloc_region, bool fill_up);
// For convenience as subclasses use it.
static G1CollectedHeap* _g1h;
virtual HeapRegion* allocate_new_region(size_t word_size) = 0;
virtual void retire_region(HeapRegion* alloc_region,
virtual G1HeapRegion* allocate_new_region(size_t word_size) = 0;
virtual void retire_region(G1HeapRegion* alloc_region,
size_t allocated_bytes) = 0;
G1AllocRegion(const char* name, bool bot_updates, uint node_index);
public:
static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
static void setup(G1CollectedHeap* g1h, G1HeapRegion* dummy_region);
HeapRegion* get() const {
HeapRegion * hr = _alloc_region;
G1HeapRegion* get() const {
G1HeapRegion * hr = _alloc_region;
// Make sure that the dummy region does not escape this class.
return (hr == _dummy_region) ? nullptr : hr;
}
@ -177,11 +177,11 @@ public:
// region. (Use Example: we try to retain the last old GC alloc
// region that we've used during a GC and we can use set() to
// re-instate it at the beginning of the next GC.)
void set(HeapRegion* alloc_region);
void set(G1HeapRegion* alloc_region);
// Should be called when we want to release the active region which
// is returned after it's been retired.
virtual HeapRegion* release();
virtual G1HeapRegion* release();
void trace(const char* str,
size_t min_word_size = 0,
@ -199,15 +199,16 @@ private:
// Retained allocation region. Used to lower the waste generated
// during mutation by having two active regions if the free space
// in a region about to be retired still could fit a TLAB.
HeapRegion* volatile _retained_alloc_region;
G1HeapRegion* volatile _retained_alloc_region;
// Decide if the region should be retained, based on the free size
// in it and the free size in the currently retained region, if any.
bool should_retain(HeapRegion* region);
bool should_retain(G1HeapRegion* region);
protected:
virtual HeapRegion* allocate_new_region(size_t word_size);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
virtual G1HeapRegion* allocate_new_region(size_t word_size);
virtual void retire_region(G1HeapRegion* alloc_region, size_t allocated_bytes);
virtual size_t retire(bool fill_up);
public:
MutatorAllocRegion(uint node_index)
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */, node_index),
@ -230,7 +231,7 @@ public:
// This specialization of release() makes sure that the retained alloc
// region is retired and set to null.
virtual HeapRegion* release();
virtual G1HeapRegion* release();
virtual void init();
};
@ -241,8 +242,8 @@ protected:
G1EvacStats* _stats;
G1HeapRegionAttr::region_type_t _purpose;
virtual HeapRegion* allocate_new_region(size_t word_size);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
virtual G1HeapRegion* allocate_new_region(size_t word_size);
virtual void retire_region(G1HeapRegion* alloc_region, size_t allocated_bytes);
virtual size_t retire(bool fill_up);

Просмотреть файл

@ -41,19 +41,19 @@ inline void G1AllocRegion::reset_alloc_region() {
_alloc_region = _dummy_region;
}
inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
inline HeapWord* G1AllocRegion::allocate(G1HeapRegion* alloc_region,
size_t word_size) {
assert(alloc_region != nullptr, "pre-condition");
return alloc_region->allocate(word_size);
}
inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region, size_t word_size) {
inline HeapWord* G1AllocRegion::par_allocate(G1HeapRegion* alloc_region, size_t word_size) {
size_t temp;
return par_allocate(alloc_region, word_size, word_size, &temp);
}
inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region,
inline HeapWord* G1AllocRegion::par_allocate(G1HeapRegion* alloc_region,
size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size) {
@ -66,7 +66,7 @@ inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region,
inline HeapWord* G1AllocRegion::attempt_allocation(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size) {
HeapRegion* alloc_region = _alloc_region;
G1HeapRegion* alloc_region = _alloc_region;
assert_alloc_region(alloc_region != nullptr, "not initialized properly");
HeapWord* result = par_allocate(alloc_region, min_word_size, desired_word_size, actual_word_size);

Просмотреть файл

@ -89,14 +89,14 @@ void G1Allocator::release_mutator_alloc_regions() {
}
}
bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
bool G1Allocator::is_retained_old_region(G1HeapRegion* hr) {
return _retained_old_gc_alloc_region == hr;
}
void G1Allocator::reuse_retained_old_region(G1EvacInfo* evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained_old) {
HeapRegion* retained_region = *retained_old;
G1HeapRegion** retained_old) {
G1HeapRegion* retained_region = *retained_old;
*retained_old = nullptr;
// We will discard the current GC alloc region if:
@ -190,7 +190,7 @@ size_t G1Allocator::unsafe_max_tlab_alloc() {
// humongous objects.
uint node_index = current_node_index();
HeapRegion* hr = mutator_alloc_region(node_index)->get();
G1HeapRegion* hr = mutator_alloc_region(node_index)->get();
size_t max_tlab = _g1h->max_tlab_size() * wordSize;
if (hr == nullptr || hr->free() < MinTLABSize) {

Просмотреть файл

@ -60,7 +60,7 @@ private:
// old objects.
OldGCAllocRegion _old_gc_alloc_region;
HeapRegion* _retained_old_gc_alloc_region;
G1HeapRegion* _retained_old_gc_alloc_region;
bool survivor_is_full() const;
bool old_is_full() const;
@ -70,7 +70,7 @@ private:
void reuse_retained_old_region(G1EvacInfo* evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained);
G1HeapRegion** retained);
// Accessors to the allocation regions.
inline MutatorAllocRegion* mutator_alloc_region(uint node_index);
@ -108,7 +108,7 @@ public:
void init_gc_alloc_regions(G1EvacInfo* evacuation_info);
void release_gc_alloc_regions(G1EvacInfo* evacuation_info);
void abandon_gc_alloc_regions();
bool is_retained_old_region(HeapRegion* hr);
bool is_retained_old_region(G1HeapRegion* hr);
// Allocate blocks of memory during mutator time.

Просмотреть файл

@ -55,9 +55,9 @@ void G1Arguments::initialize_alignments() {
// There is a circular dependency here. We base the region size on the heap
// size, but the heap size should be aligned with the region size. To get
// around this we use the unaligned values for the heap.
HeapRegion::setup_heap_region_size(MaxHeapSize);
G1HeapRegion::setup_heap_region_size(MaxHeapSize);
SpaceAlignment = HeapRegion::GrainBytes;
SpaceAlignment = G1HeapRegion::GrainBytes;
HeapAlignment = calculate_heap_alignment(SpaceAlignment);
// We need to initialize card set configuration as soon as heap region size is
@ -71,7 +71,7 @@ void G1Arguments::initialize_alignments() {
}
size_t G1Arguments::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
return G1HeapRegion::max_region_size();
}
void G1Arguments::initialize_verification_types() {
@ -130,22 +130,22 @@ void G1Arguments::initialize_mark_stack_size() {
void G1Arguments::initialize_card_set_configuration() {
assert(HeapRegion::LogOfHRGrainBytes != 0, "not initialized");
assert(G1HeapRegion::LogOfHRGrainBytes != 0, "not initialized");
// Array of Cards card set container globals.
const uint LOG_M = 20;
assert(log2i_exact(HeapRegionBounds::min_size()) == LOG_M, "inv");
assert(HeapRegion::LogOfHRGrainBytes >= LOG_M, "from the above");
uint region_size_log_mb = HeapRegion::LogOfHRGrainBytes - LOG_M;
assert(G1HeapRegion::LogOfHRGrainBytes >= LOG_M, "from the above");
uint region_size_log_mb = G1HeapRegion::LogOfHRGrainBytes - LOG_M;
if (FLAG_IS_DEFAULT(G1RemSetArrayOfCardsEntries)) {
uint max_cards_in_inline_ptr = G1CardSetConfiguration::max_cards_in_inline_ptr(HeapRegion::LogCardsPerRegion);
uint max_cards_in_inline_ptr = G1CardSetConfiguration::max_cards_in_inline_ptr(G1HeapRegion::LogCardsPerRegion);
FLAG_SET_ERGO(G1RemSetArrayOfCardsEntries, MAX2(max_cards_in_inline_ptr * 2,
G1RemSetArrayOfCardsEntriesBase << region_size_log_mb));
}
// Howl card set container globals.
if (FLAG_IS_DEFAULT(G1RemSetHowlNumBuckets)) {
FLAG_SET_ERGO(G1RemSetHowlNumBuckets, G1CardSetHowl::num_buckets(HeapRegion::CardsPerRegion,
FLAG_SET_ERGO(G1RemSetHowlNumBuckets, G1CardSetHowl::num_buckets(G1HeapRegion::CardsPerRegion,
G1RemSetArrayOfCardsEntries,
G1RemSetHowlMaxNumBuckets));
}

Просмотреть файл

@ -112,7 +112,7 @@ void G1BarrierSet::write_region(JavaThread* thread, MemRegion mr) {
// skip young gen cards
if (*byte == G1CardTable::g1_young_card_val()) {
// MemRegion should not span multiple regions for the young gen.
DEBUG_ONLY(HeapRegion* containing_hr = G1CollectedHeap::heap()->heap_region_containing(mr.start());)
DEBUG_ONLY(G1HeapRegion* containing_hr = G1CollectedHeap::heap()->heap_region_containing(mr.start());)
assert(containing_hr->is_young(), "it should be young");
assert(containing_hr->is_in(mr.start()), "it should contain start");
assert(containing_hr->is_in(mr.last()), "it should also contain last");

Просмотреть файл

@ -57,24 +57,24 @@ static uint default_log2_card_regions_per_region() {
uint log2_card_regions_per_heap_region = 0;
const uint card_container_limit = G1CardSetContainer::LogCardsPerRegionLimit;
if (card_container_limit < (uint)HeapRegion::LogCardsPerRegion) {
log2_card_regions_per_heap_region = (uint)HeapRegion::LogCardsPerRegion - card_container_limit;
if (card_container_limit < (uint)G1HeapRegion::LogCardsPerRegion) {
log2_card_regions_per_heap_region = (uint)G1HeapRegion::LogCardsPerRegion - card_container_limit;
}
return log2_card_regions_per_heap_region;
}
G1CardSetConfiguration::G1CardSetConfiguration() :
G1CardSetConfiguration(HeapRegion::LogCardsPerRegion - default_log2_card_regions_per_region(), /* inline_ptr_bits_per_card */
G1CardSetConfiguration(G1HeapRegion::LogCardsPerRegion - default_log2_card_regions_per_region(), /* inline_ptr_bits_per_card */
G1RemSetArrayOfCardsEntries, /* max_cards_in_array */
(double)G1RemSetCoarsenHowlBitmapToHowlFullPercent / 100, /* cards_in_bitmap_threshold_percent */
G1RemSetHowlNumBuckets, /* num_buckets_in_howl */
(double)G1RemSetCoarsenHowlToFullPercent / 100, /* cards_in_howl_threshold_percent */
(uint)HeapRegion::CardsPerRegion >> default_log2_card_regions_per_region(),
(uint)G1HeapRegion::CardsPerRegion >> default_log2_card_regions_per_region(),
/* max_cards_in_card_set */
default_log2_card_regions_per_region()) /* log2_card_regions_per_region */
{
assert((_log2_card_regions_per_heap_region + _log2_cards_per_card_region) == (uint)HeapRegion::LogCardsPerRegion,
assert((_log2_card_regions_per_heap_region + _log2_cards_per_card_region) == (uint)G1HeapRegion::LogCardsPerRegion,
"inconsistent heap region virtualization setup");
}
@ -395,7 +395,7 @@ G1CardSet::~G1CardSet() {
void G1CardSet::initialize(MemRegion reserved) {
const uint BitsInUint = sizeof(uint) * BitsPerByte;
const uint CardBitsWithinCardRegion = MIN2((uint)HeapRegion::LogCardsPerRegion, G1CardSetContainer::LogCardsPerRegionLimit);
const uint CardBitsWithinCardRegion = MIN2((uint)G1HeapRegion::LogCardsPerRegion, G1CardSetContainer::LogCardsPerRegionLimit);
// Check if the number of cards within a region fits an uint.
if (CardBitsWithinCardRegion > BitsInUint) {

Просмотреть файл

@ -43,7 +43,7 @@ void G1CardTable::verify_g1_young_region(MemRegion mr) {
void G1CardTableChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
// Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * G1HeapRegion::GrainWords);
_card_table->clear_MemRegion(mr);
}

Просмотреть файл

@ -31,7 +31,7 @@
inline uint G1CardTable::region_idx_for(CardValue* p) {
size_t const card_idx = pointer_delta(p, _byte_map, sizeof(CardValue));
return (uint)(card_idx >> HeapRegion::LogCardsPerRegion);
return (uint)(card_idx >> G1HeapRegion::LogCardsPerRegion);
}
inline bool G1CardTable::mark_clean_as_dirty(CardValue* card) {

Просмотреть файл

@ -298,7 +298,7 @@ class CleanCallback : public StackObj {
NONCOPYABLE(CleanCallback); // can not copy, _blobs will point to old copy
class PointsIntoHRDetectionClosure : public OopClosure {
HeapRegion* _hr;
G1HeapRegion* _hr;
template <typename T>
void do_oop_work(T* p) {
@ -309,7 +309,7 @@ class CleanCallback : public StackObj {
public:
bool _points_into;
PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
PointsIntoHRDetectionClosure(G1HeapRegion* hr) : _hr(hr), _points_into(false) {}
void do_oop(narrowOop* o) { do_oop_work(o); }
@ -320,7 +320,7 @@ class CleanCallback : public StackObj {
NMethodToOopClosure _nmethod_cl;
public:
CleanCallback(HeapRegion* hr) : _detector(hr), _nmethod_cl(&_detector, !NMethodToOopClosure::FixRelocations) {}
CleanCallback(G1HeapRegion* hr) : _detector(hr), _nmethod_cl(&_detector, !NMethodToOopClosure::FixRelocations) {}
bool operator()(nmethod** value) {
_detector._points_into = false;
@ -329,7 +329,7 @@ class CleanCallback : public StackObj {
}
};
void G1CodeRootSet::clean(HeapRegion* owner) {
void G1CodeRootSet::clean(G1HeapRegion* owner) {
assert(!_is_iterating, "should not mutate while iterating the table");
CleanCallback eval(owner);

Просмотреть файл

@ -29,7 +29,7 @@
#include "utilities/globalDefinitions.hpp"
class G1CodeRootSetHashTable;
class HeapRegion;
class G1HeapRegion;
class nmethod;
// Implements storage for a set of code roots.
@ -53,7 +53,7 @@ class G1CodeRootSet {
void nmethods_do(NMethodClosure* blk) const;
// Remove all nmethods which no longer contain pointers into our "owner" region.
void clean(HeapRegion* owner);
void clean(G1HeapRegion* owner);
bool is_empty() { return length() == 0;}

Просмотреть файл

@ -145,7 +145,7 @@ void G1CollectedHeap::run_batch_task(G1BatchedTask* cl) {
}
uint G1CollectedHeap::get_chunks_per_region() {
uint log_region_size = HeapRegion::LogOfHRGrainBytes;
uint log_region_size = G1HeapRegion::LogOfHRGrainBytes;
// Limit the expected input values to current known possible values of the
// (log) region size. Adjust as necessary after testing if changing the permissible
// values for region size.
@ -154,22 +154,22 @@ uint G1CollectedHeap::get_chunks_per_region() {
return 1u << (log_region_size / 2 - 4);
}
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
MemRegion mr) {
return new HeapRegion(hrs_index, bot(), mr, &_card_set_config);
G1HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
MemRegion mr) {
return new G1HeapRegion(hrs_index, bot(), mr, &_card_set_config);
}
// Private methods.
HeapRegion* G1CollectedHeap::new_region(size_t word_size,
HeapRegionType type,
bool do_expand,
uint node_index) {
assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
G1HeapRegion* G1CollectedHeap::new_region(size_t word_size,
HeapRegionType type,
bool do_expand,
uint node_index) {
assert(!is_humongous(word_size) || word_size <= G1HeapRegion::GrainWords,
"the only time we use this to allocate a humongous region is "
"when we are allocating a single humongous region");
HeapRegion* res = _hrm.allocate_free_region(type, node_index);
G1HeapRegion* res = _hrm.allocate_free_region(type, node_index);
if (res == nullptr && do_expand) {
// Currently, only attempts to allocate GC alloc regions set
@ -180,7 +180,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size,
log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
word_size * HeapWordSize);
assert(word_size * HeapWordSize < HeapRegion::GrainBytes,
assert(word_size * HeapWordSize < G1HeapRegion::GrainBytes,
"This kind of expansion should never be more than one region. Size: " SIZE_FORMAT,
word_size * HeapWordSize);
if (expand_single_region(node_index)) {
@ -194,14 +194,14 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size,
return res;
}
void G1CollectedHeap::set_humongous_metadata(HeapRegion* first_hr,
void G1CollectedHeap::set_humongous_metadata(G1HeapRegion* first_hr,
uint num_regions,
size_t word_size,
bool update_remsets) {
// Calculate the new top of the humongous object.
HeapWord* obj_top = first_hr->bottom() + word_size;
// The word size sum of all the regions used
size_t word_size_sum = num_regions * HeapRegion::GrainWords;
size_t word_size_sum = num_regions * G1HeapRegion::GrainWords;
assert(word_size <= word_size_sum, "sanity");
// How many words memory we "waste" which cannot hold a filler object.
@ -236,7 +236,7 @@ void G1CollectedHeap::set_humongous_metadata(HeapRegion* first_hr,
uint first = first_hr->hrm_index();
uint last = first + num_regions - 1;
HeapRegion* hr = nullptr;
G1HeapRegion* hr = nullptr;
for (uint i = first + 1; i <= last; ++i) {
hr = region_at(i);
hr->hr_clear(false /* clear_space */);
@ -277,12 +277,12 @@ void G1CollectedHeap::set_humongous_metadata(HeapRegion* first_hr,
}
HeapWord*
G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
G1CollectedHeap::humongous_obj_allocate_initialize_regions(G1HeapRegion* first_hr,
uint num_regions,
size_t word_size) {
assert(first_hr != nullptr, "pre-condition");
assert(is_humongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
assert(num_regions * G1HeapRegion::GrainWords >= word_size, "pre-condition");
// Index of last region in the series.
uint first = first_hr->hrm_index();
@ -318,13 +318,13 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
// Next, update the metadata for the regions.
set_humongous_metadata(first_hr, num_regions, word_size, true);
HeapRegion* last_hr = region_at(last);
G1HeapRegion* last_hr = region_at(last);
size_t used = byte_size(first_hr->bottom(), last_hr->top());
increase_used(used);
for (uint i = first; i <= last; ++i) {
HeapRegion *hr = region_at(i);
G1HeapRegion *hr = region_at(i);
_humongous_set.add(hr);
G1HeapRegionPrinter::alloc(hr);
}
@ -334,7 +334,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
return align_up(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
return align_up(word_size, G1HeapRegion::GrainWords) / G1HeapRegion::GrainWords;
}
// If could fit into free regions w/o expansion, try.
@ -348,7 +348,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
// Policy: First try to allocate a humongous object in the free list.
HeapRegion* humongous_start = _hrm.allocate_humongous(obj_regions);
G1HeapRegion* humongous_start = _hrm.allocate_humongous(obj_regions);
if (humongous_start == nullptr) {
// Policy: We could not find enough regions for the humongous object in the
// free list. Look through the heap to find a mix of free and uncommitted regions.
@ -473,12 +473,12 @@ template <typename Func>
void G1CollectedHeap::iterate_regions_in_range(MemRegion range, const Func& func) {
// Mark each G1 region touched by the range as old, add it to
// the old set, and set top.
HeapRegion* curr_region = _hrm.addr_to_region(range.start());
HeapRegion* end_region = _hrm.addr_to_region(range.last());
G1HeapRegion* curr_region = _hrm.addr_to_region(range.start());
G1HeapRegion* end_region = _hrm.addr_to_region(range.last());
while (curr_region != nullptr) {
bool is_last = curr_region == end_region;
HeapRegion* next_region = is_last ? nullptr : _hrm.next_region_in_heap(curr_region);
G1HeapRegion* next_region = is_last ? nullptr : _hrm.next_region_in_heap(curr_region);
func(curr_region, is_last);
@ -504,7 +504,7 @@ HeapWord* G1CollectedHeap::alloc_archive_region(size_t word_size, HeapWord* pref
size_t commits = 0;
// Attempt to allocate towards the end of the heap.
HeapWord* start_addr = reserved.end() - align_up(word_size, HeapRegion::GrainWords);
HeapWord* start_addr = reserved.end() - align_up(word_size, G1HeapRegion::GrainWords);
MemRegion range = MemRegion(start_addr, word_size);
HeapWord* last_address = range.last();
if (!_hrm.allocate_containing_regions(range, &commits, workers())) {
@ -513,12 +513,12 @@ HeapWord* G1CollectedHeap::alloc_archive_region(size_t word_size, HeapWord* pref
increase_used(word_size * HeapWordSize);
if (commits != 0) {
log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize * commits);
G1HeapRegion::GrainWords * HeapWordSize * commits);
}
// Mark each G1 region touched by the range as old, add it to
// the old set, and set top.
auto set_region_to_old = [&] (HeapRegion* r, bool is_last) {
auto set_region_to_old = [&] (G1HeapRegion* r, bool is_last) {
assert(r->is_empty(), "Region already in use (%u)", r->hrm_index());
HeapWord* top = is_last ? last_address + 1 : r->end();
@ -537,7 +537,7 @@ void G1CollectedHeap::populate_archive_regions_bot(MemRegion range) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
iterate_regions_in_range(range,
[&] (HeapRegion* r, bool is_last) {
[&] (G1HeapRegion* r, bool is_last) {
r->update_bot();
});
}
@ -559,7 +559,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion range) {
size_used += range.byte_size();
// Free, empty and uncommit regions with CDS archive content.
auto dealloc_archive_region = [&] (HeapRegion* r, bool is_last) {
auto dealloc_archive_region = [&] (G1HeapRegion* r, bool is_last) {
guarantee(r->is_old(), "Expected old region at index %u", r->hrm_index());
_old_set.remove(r);
r->set_free();
@ -572,7 +572,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion range) {
if (shrink_count != 0) {
log_debug(gc, ergo, heap)("Attempt heap shrinking (CDS archive regions). Total size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize * shrink_count);
G1HeapRegion::GrainWords * HeapWordSize * shrink_count);
// Explicit uncommit.
uncommit_regions(shrink_count);
}
@ -650,7 +650,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
result = humongous_obj_allocate(word_size);
if (result != nullptr) {
policy()->old_gen_alloc_tracker()->
add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
add_allocated_humongous_bytes_since_last_gc(size_in_regions * G1HeapRegion::GrainBytes);
return result;
}
@ -666,7 +666,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
if (result != nullptr) {
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
policy()->old_gen_alloc_tracker()->
record_collection_pause_humongous_allocation(size_in_regions * HeapRegion::GrainBytes);
record_collection_pause_humongous_allocation(size_in_regions * G1HeapRegion::GrainBytes);
}
return result;
}
@ -712,7 +712,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
class PostCompactionPrinterClosure: public HeapRegionClosure {
public:
bool do_heap_region(HeapRegion* hr) {
bool do_heap_region(G1HeapRegion* hr) {
assert(!hr->is_young(), "not expecting to find young regions");
G1HeapRegionPrinter::post_compaction(hr);
return false;
@ -991,8 +991,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_workers, double* expand_time_ms) {
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
aligned_expand_bytes = align_up(aligned_expand_bytes,
HeapRegion::GrainBytes);
aligned_expand_bytes = align_up(aligned_expand_bytes, G1HeapRegion::GrainBytes);
log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
expand_bytes, aligned_expand_bytes);
@ -1003,7 +1002,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_worker
}
double expand_heap_start_time_sec = os::elapsedTime();
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
uint regions_to_expand = (uint)(aligned_expand_bytes / G1HeapRegion::GrainBytes);
assert(regions_to_expand > 0, "Must expand by at least one region");
uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
@ -1013,7 +1012,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_worker
assert(expanded_by > 0, "must have failed during commit.");
size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
size_t actual_expand_bytes = expanded_by * G1HeapRegion::GrainBytes;
assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
policy()->record_new_heap_size(num_regions());
@ -1036,12 +1035,11 @@ bool G1CollectedHeap::expand_single_region(uint node_index) {
void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
size_t aligned_shrink_bytes =
ReservedSpace::page_align_size_down(shrink_bytes);
aligned_shrink_bytes = align_down(aligned_shrink_bytes,
HeapRegion::GrainBytes);
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
aligned_shrink_bytes = align_down(aligned_shrink_bytes, G1HeapRegion::GrainBytes);
uint num_regions_to_remove = (uint)(shrink_bytes / G1HeapRegion::GrainBytes);
uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
size_t shrunk_bytes = num_regions_removed * G1HeapRegion::GrainBytes;
log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B actual amount shrunk: " SIZE_FORMAT "B",
shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
@ -1096,7 +1094,7 @@ public:
guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
}
}
bool is_correct_type(HeapRegion* hr) { return hr->is_old(); }
bool is_correct_type(G1HeapRegion* hr) { return hr->is_old(); }
const char* get_description() { return "Old Regions"; }
};
@ -1120,7 +1118,7 @@ public:
"master humongous set MT safety protocol outside a safepoint");
}
}
bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
bool is_correct_type(G1HeapRegion* hr) { return hr->is_humongous(); }
const char* get_description() { return "Humongous Regions"; }
};
@ -1179,13 +1177,13 @@ G1CollectedHeap::G1CollectedHeap() :
_heap_sizing_policy = G1HeapSizingPolicy::create(this, _policy->analytics());
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
_humongous_object_threshold_in_words = humongous_threshold_for(G1HeapRegion::GrainWords);
// Since filler arrays are never referenced, we can make them region sized.
// This simplifies filling up the region in case we have some potentially
// unreferenced (by Java code, but still in use by native code) pinned objects
// in there.
_filler_array_max_size = HeapRegion::GrainWords;
_filler_array_max_size = G1HeapRegion::GrainWords;
// Override the default _stack_chunk_max_size so that no humongous stack chunks are created
_stack_chunk_max_size = _humongous_object_threshold_in_words;
@ -1214,7 +1212,7 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
G1RegionToSpaceMapper::create_mapper(rs,
size,
page_size,
HeapRegion::GrainBytes,
G1HeapRegion::GrainBytes,
translation_factor,
mtGC);
@ -1260,8 +1258,8 @@ jint G1CollectedHeap::initialize() {
size_t reserved_byte_size = G1Arguments::heap_reserved_size_bytes();
// Ensure that the sizes are properly aligned.
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(reserved_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(init_byte_size, G1HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(reserved_byte_size, G1HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(reserved_byte_size, HeapAlignment, "g1 heap");
// Reserve the maximum.
@ -1270,7 +1268,7 @@ jint G1CollectedHeap::initialize() {
// is calculated by subtracting the requested size from the
// 32Gb boundary and using the result as the base address for
// heap reservation. If the requested size is not aligned to
// HeapRegion::GrainBytes (i.e. the alignment that is passed
// G1HeapRegion::GrainBytes (i.e. the alignment that is passed
// into the ReservedHeapSpace constructor) then the actual
// base of the reserved heap may end up differing from the
// address that was requested (i.e. the preferred heap base).
@ -1302,7 +1300,7 @@ jint G1CollectedHeap::initialize() {
G1RegionToSpaceMapper::create_mapper(heap_rs,
heap_rs.size(),
page_size,
HeapRegion::GrainBytes,
G1HeapRegion::GrainBytes,
1,
mtJavaHeap);
if(heap_storage == nullptr) {
@ -1350,8 +1348,8 @@ jint G1CollectedHeap::initialize() {
_rem_set->initialize(max_reserved_regions());
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
guarantee(G1HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
guarantee(G1HeapRegion::CardsPerRegion < max_cards_per_region,
"too many cards per region");
HeapRegionRemSet::initialize(_reserved);
@ -1361,7 +1359,7 @@ jint G1CollectedHeap::initialize() {
_bot = new G1BlockOffsetTable(reserved(), bot_storage);
{
size_t granularity = HeapRegion::GrainBytes;
size_t granularity = G1HeapRegion::GrainBytes;
_region_attr.initialize(reserved(), granularity);
}
@ -1372,7 +1370,7 @@ jint G1CollectedHeap::initialize() {
}
_workers->initialize_workers();
_numa->set_region_info(HeapRegion::GrainBytes, page_size);
_numa->set_region_info(G1HeapRegion::GrainBytes, page_size);
// Create the G1ConcurrentMark data structure and thread.
// (Must do this late, so that "max_[reserved_]regions" is defined.)
@ -1405,9 +1403,9 @@ jint G1CollectedHeap::initialize() {
_free_arena_memory_task = new G1MonotonicArenaFreeMemoryTask("Card Set Free Memory Task");
_service_thread->register_task(_free_arena_memory_task);
// Here we allocate the dummy HeapRegion that is required by the
// Here we allocate the dummy G1HeapRegion that is required by the
// G1AllocRegion class.
HeapRegion* dummy_region = _hrm.get_dummy_region();
G1HeapRegion* dummy_region = _hrm.get_dummy_region();
// We'll re-use the same region whether the alloc region will
// require BOT updates or not and, if it doesn't, then a non-young
@ -1521,7 +1519,7 @@ void G1CollectedHeap::ref_processing_init() {
}
size_t G1CollectedHeap::capacity() const {
return _hrm.length() * HeapRegion::GrainBytes;
return _hrm.length() * G1HeapRegion::GrainBytes;
}
size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
@ -1542,7 +1540,7 @@ class SumUsedClosure: public HeapRegionClosure {
size_t _used;
public:
SumUsedClosure() : _used(0) {}
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
_used += r->used();
return false;
}
@ -1887,13 +1885,13 @@ bool G1CollectedHeap::is_in(const void* p) const {
// Iteration functions.
// Iterates an ObjectClosure over all objects within a HeapRegion.
// Iterates an ObjectClosure over all objects within a G1HeapRegion.
class IterateObjectClosureRegionClosure: public HeapRegionClosure {
ObjectClosure* _cl;
public:
IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
if (!r->is_continues_humongous()) {
r->object_iterate(_cl);
}
@ -1986,7 +1984,7 @@ void G1CollectedHeap::par_iterate_regions_array(HeapRegionClosure* cl,
do {
uint region_idx = regions[cur_pos];
if (hr_claimer == nullptr || hr_claimer->claim_region(region_idx)) {
HeapRegion* r = region_at(region_idx);
G1HeapRegion* r = region_at(region_idx);
bool result = cl->do_heap_region(r);
guarantee(!result, "Must not cancel iteration");
}
@ -1999,9 +1997,9 @@ void G1CollectedHeap::par_iterate_regions_array(HeapRegionClosure* cl,
}
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
HeapRegion* hr = heap_region_containing(addr);
G1HeapRegion* hr = heap_region_containing(addr);
// The CollectedHeap API requires us to not fail for any given address within
// the heap. HeapRegion::block_start() has been optimized to not accept addresses
// the heap. G1HeapRegion::block_start() has been optimized to not accept addresses
// outside of the allocated area.
if (addr >= hr->top()) {
return nullptr;
@ -2010,16 +2008,16 @@ HeapWord* G1CollectedHeap::block_start(const void* addr) const {
}
bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
HeapRegion* hr = heap_region_containing(addr);
G1HeapRegion* hr = heap_region_containing(addr);
return hr->block_is_obj(addr, hr->parsable_bottom_acquire());
}
size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
return (_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
return (_policy->young_list_target_length() - _survivor.length()) * G1HeapRegion::GrainBytes;
}
size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
return _eden.length() * HeapRegion::GrainBytes;
return _eden.length() * G1HeapRegion::GrainBytes;
}
// For G1 TLABs should not contain humongous objects, so the maximum TLAB size
@ -2033,7 +2031,7 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
}
size_t G1CollectedHeap::max_capacity() const {
return max_regions() * HeapRegion::GrainBytes;
return max_regions() * G1HeapRegion::GrainBytes;
}
void G1CollectedHeap::prepare_for_verify() {
@ -2052,14 +2050,14 @@ class PrintRegionClosure: public HeapRegionClosure {
outputStream* _st;
public:
PrintRegionClosure(outputStream* st) : _st(st) {}
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
r->print_on(_st);
return false;
}
};
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
const HeapRegion* hr,
const G1HeapRegion* hr,
const VerifyOption vo) const {
switch (vo) {
case VerifyOption::G1UseConcMarking: return is_obj_dead(obj, hr);
@ -2096,13 +2094,13 @@ void G1CollectedHeap::print_on(outputStream* st) const {
p2i(_hrm.reserved().start()),
p2i(_hrm.reserved().end()));
st->cr();
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
st->print(" region size " SIZE_FORMAT "K, ", G1HeapRegion::GrainBytes / K);
uint young_regions = young_regions_count();
st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
(size_t) young_regions * HeapRegion::GrainBytes / K);
(size_t) young_regions * G1HeapRegion::GrainBytes / K);
uint survivor_regions = survivor_regions_count();
st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
(size_t) survivor_regions * HeapRegion::GrainBytes / K);
(size_t) survivor_regions * G1HeapRegion::GrainBytes / K);
st->cr();
if (_numa->is_enabled()) {
uint num_nodes = _numa->num_active_nodes();
@ -2169,7 +2167,7 @@ G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
size_t eden_capacity_bytes =
(policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
(policy()->young_list_target_length() * G1HeapRegion::GrainBytes) - survivor_used_bytes;
VirtualSpaceSummary heap_summary = create_heap_space_summary();
return G1HeapSummary(heap_summary, heap_used, eden_used_bytes, eden_capacity_bytes,
@ -2280,7 +2278,7 @@ void G1CollectedHeap::start_concurrent_cycle(bool concurrent_operation_is_full_m
CGC_lock->notify();
}
bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
bool G1CollectedHeap::is_potential_eager_reclaim_candidate(G1HeapRegion* r) const {
// We don't nominate objects with many remembered set entries, on
// the assumption that such objects are likely still live.
HeapRegionRemSet* rem_set = r->rem_set();
@ -2292,7 +2290,7 @@ bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const
void G1CollectedHeap::verify_region_attr_remset_is_tracked() {
class VerifyRegionAttrRemSet : public HeapRegionClosure {
public:
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
bool const remset_is_tracked = g1h->region_attr(r->bottom()).remset_is_tracked();
assert(r->rem_set()->is_tracked() == remset_is_tracked,
@ -2545,7 +2543,7 @@ class G1BulkUnregisterNMethodTask : public WorkerTask {
class UnregisterNMethodsHeapRegionClosure : public HeapRegionClosure {
public:
bool do_heap_region(HeapRegion* hr) {
bool do_heap_region(G1HeapRegion* hr) {
hr->rem_set()->bulk_remove_code_roots();
return false;
}
@ -2612,11 +2610,11 @@ void G1CollectedHeap::record_obj_copy_mem_stats() {
create_g1_evac_summary(&_old_evac_stats));
}
void G1CollectedHeap::clear_bitmap_for_region(HeapRegion* hr) {
void G1CollectedHeap::clear_bitmap_for_region(G1HeapRegion* hr) {
concurrent_mark()->clear_bitmap_for_region(hr);
}
void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
void G1CollectedHeap::free_region(G1HeapRegion* hr, FreeRegionList* free_list) {
assert(!hr->is_free(), "the region should not be free");
assert(!hr->is_empty(), "the region should not be empty");
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
@ -2632,12 +2630,12 @@ void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
}
}
void G1CollectedHeap::retain_region(HeapRegion* hr) {
void G1CollectedHeap::retain_region(G1HeapRegion* hr) {
MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
collection_set()->candidates()->add_retained_region_unsorted(hr);
}
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
void G1CollectedHeap::free_humongous_region(G1HeapRegion* hr,
FreeRegionList* free_list) {
assert(hr->is_humongous(), "this is only for humongous regions");
hr->clear_humongous();
@ -2682,7 +2680,7 @@ void G1CollectedHeap::rebuild_free_region_list() {
class G1AbandonCollectionSetClosure : public HeapRegionClosure {
public:
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
G1CollectedHeap::heap()->clear_region_attr(r);
r->clear_young_index_in_cset();
@ -2698,11 +2696,11 @@ void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
collection_set->stop_incremental_building();
}
bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
bool G1CollectedHeap::is_old_gc_alloc_region(G1HeapRegion* hr) {
return _allocator->is_retained_old_region(hr);
}
void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
void G1CollectedHeap::set_region_short_lived_locked(G1HeapRegion* hr) {
_eden.add(hr);
_policy->set_region_eden(hr);
}
@ -2714,7 +2712,7 @@ private:
bool _success;
public:
NoYoungRegionsClosure() : _success(true) { }
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
if (r->is_young()) {
log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
p2i(r->bottom()), p2i(r->end()));
@ -2737,8 +2735,8 @@ bool G1CollectedHeap::check_young_list_empty() {
#endif // ASSERT
// Remove the given HeapRegion from the appropriate region set.
void G1CollectedHeap::prepare_region_for_full_compaction(HeapRegion* hr) {
// Remove the given G1HeapRegion from the appropriate region set.
void G1CollectedHeap::prepare_region_for_full_compaction(G1HeapRegion* hr) {
if (hr->is_humongous()) {
_humongous_set.remove(hr);
} else if (hr->is_old()) {
@ -2795,7 +2793,7 @@ public:
}
}
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
if (r->is_empty()) {
assert(r->rem_set()->is_empty(), "Empty regions should have empty remembered sets.");
// Add free regions to the free list
@ -2845,15 +2843,15 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
// Methods for the mutator alloc region
HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
G1HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
uint node_index) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
bool should_allocate = policy()->should_allocate_mutator_region();
if (should_allocate) {
HeapRegion* new_alloc_region = new_region(word_size,
HeapRegionType::Eden,
false /* do_expand */,
node_index);
G1HeapRegion* new_alloc_region = new_region(word_size,
HeapRegionType::Eden,
false /* do_expand */,
node_index);
if (new_alloc_region != nullptr) {
set_region_short_lived_locked(new_alloc_region);
G1HeapRegionPrinter::alloc(new_alloc_region);
@ -2864,7 +2862,7 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
return nullptr;
}
void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
void G1CollectedHeap::retire_mutator_alloc_region(G1HeapRegion* alloc_region,
size_t allocated_bytes) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
@ -2890,7 +2888,7 @@ bool G1CollectedHeap::has_more_regions(G1HeapRegionAttr dest) {
}
}
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index) {
G1HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index) {
assert(FreeList_lock->owned_by_self(), "pre-condition");
if (!has_more_regions(dest)) {
@ -2904,10 +2902,10 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA
type = HeapRegionType::Old;
}
HeapRegion* new_alloc_region = new_region(word_size,
type,
true /* do_expand */,
node_index);
G1HeapRegion* new_alloc_region = new_region(word_size,
type,
true /* do_expand */,
node_index);
if (new_alloc_region != nullptr) {
if (type.is_survivor()) {
@ -2925,7 +2923,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA
return nullptr;
}
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
void G1CollectedHeap::retire_gc_alloc_region(G1HeapRegion* alloc_region,
size_t allocated_bytes,
G1HeapRegionAttr dest) {
_bytes_used_during_gc += allocated_bytes;
@ -2943,14 +2941,14 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
G1HeapRegionPrinter::retire(alloc_region);
}
HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
G1HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
bool expanded = false;
uint index = _hrm.find_highest_free(&expanded);
if (index != G1_NO_HRM_INDEX) {
if (expanded) {
log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize);
G1HeapRegion::GrainWords * HeapWordSize);
}
return _hrm.allocate_free_regions_starting_at(index, 1);
}
@ -2976,7 +2974,7 @@ public:
oop heap_oop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(heap_oop)) {
oop obj = CompressedOops::decode_not_null(heap_oop);
HeapRegion* hr = _g1h->heap_region_containing(obj);
G1HeapRegion* hr = _g1h->heap_region_containing(obj);
assert(!hr->is_continues_humongous(),
"trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
" starting at " HR_FORMAT,
@ -3045,7 +3043,7 @@ GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
}
void G1CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
HeapRegion* region = heap_region_containing(start);
G1HeapRegion* region = heap_region_containing(start);
region->fill_with_dummy_object(start, pointer_delta(end, start), zap);
}

Просмотреть файл

@ -79,7 +79,7 @@ class G1RemSet;
class G1ServiceTask;
class G1ServiceThread;
class GCMemoryManager;
class HeapRegion;
class G1HeapRegion;
class MemoryPool;
class nmethod;
class ReferenceProcessor;
@ -196,7 +196,7 @@ public:
// Start a new incremental collection set for the next pause.
void start_new_collection_set();
void prepare_region_for_full_compaction(HeapRegion* hr);
void prepare_region_for_full_compaction(G1HeapRegion* hr);
private:
// Rebuilds the region sets / lists so that they are repopulated to
@ -382,20 +382,20 @@ private:
G1CollectionSet _collection_set;
// Try to allocate a single non-humongous HeapRegion sufficient for
// Try to allocate a single non-humongous G1HeapRegion sufficient for
// an allocation of the given word_size. If do_expand is true,
// attempt to expand the heap if necessary to satisfy the allocation
// request. 'type' takes the type of region to be allocated. (Use constants
// Old, Eden, Humongous, Survivor defined in HeapRegionType.)
HeapRegion* new_region(size_t word_size,
HeapRegionType type,
bool do_expand,
uint node_index = G1NUMA::AnyNodeIndex);
G1HeapRegion* new_region(size_t word_size,
HeapRegionType type,
bool do_expand,
uint node_index = G1NUMA::AnyNodeIndex);
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
HeapWord* humongous_obj_allocate_initialize_regions(G1HeapRegion* first_hr,
uint num_regions,
size_t word_size);
@ -465,14 +465,14 @@ private:
// These methods are the "callbacks" from the G1AllocRegion class.
// For mutator alloc regions.
HeapRegion* new_mutator_alloc_region(size_t word_size, uint node_index);
void retire_mutator_alloc_region(HeapRegion* alloc_region,
G1HeapRegion* new_mutator_alloc_region(size_t word_size, uint node_index);
void retire_mutator_alloc_region(G1HeapRegion* alloc_region,
size_t allocated_bytes);
// For GC alloc regions.
bool has_more_regions(G1HeapRegionAttr dest);
HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
void retire_gc_alloc_region(HeapRegion* alloc_region,
G1HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
void retire_gc_alloc_region(G1HeapRegion* alloc_region,
size_t allocated_bytes, G1HeapRegionAttr dest);
// - if clear_all_soft_refs is true, all soft references should be
@ -572,7 +572,7 @@ public:
// Expand the garbage-first heap by at least the given size (in bytes!).
// Returns true if the heap was expanded by the requested amount;
// false otherwise.
// (Rounds up to a HeapRegion boundary.)
// (Rounds up to a G1HeapRegion boundary.)
bool expand(size_t expand_bytes, WorkerThreads* pretouch_workers = nullptr, double* expand_time_ms = nullptr);
bool expand_single_region(uint node_index);
@ -593,7 +593,7 @@ public:
void gc_epilogue(bool full);
// Does the given region fulfill remembered set based eager reclaim candidate requirements?
bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
bool is_potential_eager_reclaim_candidate(G1HeapRegion* r) const;
inline bool is_humongous_reclaim_candidate(uint region);
@ -604,22 +604,22 @@ public:
// Register the given region to be part of the collection set.
inline void register_humongous_candidate_region_with_region_attr(uint index);
void set_humongous_metadata(HeapRegion* first_hr,
void set_humongous_metadata(G1HeapRegion* first_hr,
uint num_regions,
size_t word_size,
bool update_remsets);
// We register a region with the fast "in collection set" test. We
// simply set to true the array slot corresponding to this region.
void register_young_region_with_region_attr(HeapRegion* r) {
void register_young_region_with_region_attr(G1HeapRegion* r) {
_region_attr.set_in_young(r->hrm_index(), r->has_pinned_objects());
}
inline void register_new_survivor_region_with_region_attr(HeapRegion* r);
inline void register_region_with_region_attr(HeapRegion* r);
inline void register_old_region_with_region_attr(HeapRegion* r);
inline void register_optional_region_with_region_attr(HeapRegion* r);
inline void register_new_survivor_region_with_region_attr(G1HeapRegion* r);
inline void register_region_with_region_attr(G1HeapRegion* r);
inline void register_old_region_with_region_attr(G1HeapRegion* r);
inline void register_optional_region_with_region_attr(G1HeapRegion* r);
void clear_region_attr(const HeapRegion* hr) {
void clear_region_attr(const G1HeapRegion* hr) {
_region_attr.clear(hr);
}
@ -631,7 +631,7 @@ public:
// for all regions.
void verify_region_attr_remset_is_tracked() PRODUCT_RETURN;
void clear_bitmap_for_region(HeapRegion* hr);
void clear_bitmap_for_region(G1HeapRegion* hr);
bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
@ -667,11 +667,11 @@ public:
}
// Allocates a new heap region instance.
HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
G1HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
// Allocate the highest free region in the reserved heap. This will commit
// regions as necessary.
HeapRegion* alloc_highest_free_region();
G1HeapRegion* alloc_highest_free_region();
// Frees a region by resetting its metadata and adding it to the free list
// passed as a parameter (this is usually a local list which will be appended
@ -679,10 +679,10 @@ public:
// in another way).
// Callers must ensure they are the only one calling free on the given region
// at the same time.
void free_region(HeapRegion* hr, FreeRegionList* free_list);
void free_region(G1HeapRegion* hr, FreeRegionList* free_list);
// Add the given region to the retained regions collection set candidates.
void retain_region(HeapRegion* hr);
void retain_region(G1HeapRegion* hr);
// It dirties the cards that cover the block so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
@ -696,10 +696,10 @@ public:
// list later).
// The method assumes that only a single thread is ever calling
// this for a particular region at once.
void free_humongous_region(HeapRegion* hr,
void free_humongous_region(G1HeapRegion* hr,
FreeRegionList* free_list);
// Execute func(HeapRegion* r, bool is_last) on every region covered by the
// Execute func(G1HeapRegion* r, bool is_last) on every region covered by the
// given range.
template <typename Func>
void iterate_regions_in_range(MemRegion range, const Func& func);
@ -725,7 +725,7 @@ public:
private:
// Shrink the garbage-first heap by at most the given size (in bytes!).
// (Rounds down to a HeapRegion boundary.)
// (Rounds down to a G1HeapRegion boundary.)
void shrink(size_t shrink_bytes);
void shrink_helper(size_t expand_bytes);
@ -918,7 +918,7 @@ public:
const G1CollectionSet* collection_set() const { return &_collection_set; }
G1CollectionSet* collection_set() { return &_collection_set; }
inline bool is_collection_set_candidate(const HeapRegion* r) const;
inline bool is_collection_set_candidate(const G1HeapRegion* r) const;
void initialize_serviceability() override;
MemoryUsage memory_usage() override;
@ -993,21 +993,21 @@ public:
uint num_used_regions() const { return num_regions() - num_free_regions(); }
#ifdef ASSERT
bool is_on_master_free_list(HeapRegion* hr) {
bool is_on_master_free_list(G1HeapRegion* hr) {
return _hrm.is_free(hr);
}
#endif // ASSERT
inline void old_set_add(HeapRegion* hr);
inline void old_set_remove(HeapRegion* hr);
inline void old_set_add(G1HeapRegion* hr);
inline void old_set_remove(G1HeapRegion* hr);
size_t non_young_capacity_bytes() {
return (old_regions_count() + humongous_regions_count()) * HeapRegion::GrainBytes;
return (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes;
}
// Determine whether the given region is one that we are using as an
// old GC alloc region.
bool is_old_gc_alloc_region(HeapRegion* hr);
bool is_old_gc_alloc_region(G1HeapRegion* hr);
// Perform a collection of the heap; intended for use in implementing
// "System.gc". This probably implies as full a collection as the
@ -1029,7 +1029,7 @@ public:
// Return "TRUE" iff the given object address is within the collection
// set. Assumes that the reference points into the heap.
inline bool is_in_cset(const HeapRegion* hr) const;
inline bool is_in_cset(const G1HeapRegion* hr) const;
inline bool is_in_cset(oop obj) const;
inline bool is_in_cset(HeapWord* addr) const;
@ -1076,13 +1076,13 @@ public:
void heap_region_iterate(HeapRegionIndexClosure* blk) const;
// Return the region with the given index. It assumes the index is valid.
inline HeapRegion* region_at(uint index) const;
inline HeapRegion* region_at_or_null(uint index) const;
inline G1HeapRegion* region_at(uint index) const;
inline G1HeapRegion* region_at_or_null(uint index) const;
// Iterate over the regions that the humongous object starting at the given
// region and apply the given method with the signature f(HeapRegion*) on them.
// region and apply the given method with the signature f(G1HeapRegion*) on them.
template <typename Func>
void humongous_obj_regions_iterate(HeapRegion* start, const Func& f);
void humongous_obj_regions_iterate(G1HeapRegion* start, const Func& f);
// Calculate the region index of the given address. Given address must be
// within the heap.
@ -1130,12 +1130,12 @@ public:
size_t length,
uint worker_id) const;
// Returns the HeapRegion that contains addr. addr must not be null.
inline HeapRegion* heap_region_containing(const void* addr) const;
// Returns the G1HeapRegion that contains addr. addr must not be null.
inline G1HeapRegion* heap_region_containing(const void* addr) const;
// Returns the HeapRegion that contains addr, or null if that is an uncommitted
// Returns the G1HeapRegion that contains addr, or null if that is an uncommitted
// region. addr must not be null.
inline HeapRegion* heap_region_containing_or_null(const void* addr) const;
inline G1HeapRegion* heap_region_containing_or_null(const void* addr) const;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly
@ -1197,7 +1197,7 @@ public:
return named_heap<G1CollectedHeap>(CollectedHeap::G1);
}
void set_region_short_lived_locked(HeapRegion* hr);
void set_region_short_lived_locked(G1HeapRegion* hr);
// add appropriate methods for any other surv rate groups
G1SurvivorRegions* survivor() { return &_survivor; }
@ -1221,7 +1221,7 @@ public:
inline static bool is_obj_filler(const oop obj);
// Determine if an object is dead, given the object and also
// the region to which the object belongs.
inline bool is_obj_dead(const oop obj, const HeapRegion* hr) const;
inline bool is_obj_dead(const oop obj, const G1HeapRegion* hr) const;
// Determine if an object is dead, given only the object itself.
// This will find the region to which the object belongs and
@ -1229,7 +1229,7 @@ public:
// If obj is null it is not dead.
inline bool is_obj_dead(const oop obj) const;
inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
inline bool is_obj_dead_full(const oop obj, const G1HeapRegion* hr) const;
inline bool is_obj_dead_full(const oop obj) const;
// Mark the live object that failed evacuation in the bitmap.
@ -1286,7 +1286,7 @@ public:
// are the same as those above.
bool is_obj_dead_cond(const oop obj,
const HeapRegion* hr,
const G1HeapRegion* hr,
const VerifyOption vo) const;
bool is_obj_dead_cond(const oop obj,

Просмотреть файл

@ -103,17 +103,17 @@ inline size_t G1CollectedHeap::clamp_plab_size(size_t value) const {
// Inline functions for G1CollectedHeap
// Return the region with the given index. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
inline G1HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
// Return the region with the given index, or null if unmapped. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
inline G1HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
template <typename Func>
inline void G1CollectedHeap::humongous_obj_regions_iterate(HeapRegion* start, const Func& f) {
inline void G1CollectedHeap::humongous_obj_regions_iterate(G1HeapRegion* start, const Func& f) {
assert(start->is_starts_humongous(), "must be");
do {
HeapRegion* next = _hrm.next_region_in_humongous(start);
G1HeapRegion* next = _hrm.next_region_in_humongous(start);
f(start);
start = next;
} while (start != nullptr);
@ -123,29 +123,29 @@ inline uint G1CollectedHeap::addr_to_region(const void* addr) const {
assert(is_in_reserved(addr),
"Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
p2i(addr), p2i(reserved().start()), p2i(reserved().end()));
return (uint)(pointer_delta(addr, reserved().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
return (uint)(pointer_delta(addr, reserved().start(), sizeof(uint8_t)) >> G1HeapRegion::LogOfHRGrainBytes);
}
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
return _hrm.reserved().start() + index * HeapRegion::GrainWords;
return _hrm.reserved().start() + index * G1HeapRegion::GrainWords;
}
inline HeapRegion* G1CollectedHeap::heap_region_containing(const void* addr) const {
inline G1HeapRegion* G1CollectedHeap::heap_region_containing(const void* addr) const {
uint const region_idx = addr_to_region(addr);
return region_at(region_idx);
}
inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const void* addr) const {
inline G1HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const void* addr) const {
uint const region_idx = addr_to_region(addr);
return region_at_or_null(region_idx);
}
inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
inline void G1CollectedHeap::old_set_add(G1HeapRegion* hr) {
_old_set.add(hr);
}
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
inline void G1CollectedHeap::old_set_remove(G1HeapRegion* hr) {
_old_set.remove(hr);
}
@ -160,7 +160,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
// Assign the containing region to containing_hr so that we don't
// have to keep calling heap_region_containing() in the
// asserts below.
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
DEBUG_ONLY(G1HeapRegion* containing_hr = heap_region_containing(start);)
assert(word_size > 0, "pre-condition");
assert(containing_hr->is_in(start), "it should contain start");
assert(containing_hr->is_young(), "it should be young");
@ -193,7 +193,7 @@ inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) const {
return _region_attr.is_in_cset(addr);
}
bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) const {
bool G1CollectedHeap::is_in_cset(const G1HeapRegion* hr) const {
return _region_attr.is_in_cset(hr);
}
@ -215,23 +215,23 @@ void G1CollectedHeap::register_humongous_candidate_region_with_region_attr(uint
_region_attr.set_humongous_candidate(index);
}
void G1CollectedHeap::register_new_survivor_region_with_region_attr(HeapRegion* r) {
void G1CollectedHeap::register_new_survivor_region_with_region_attr(G1HeapRegion* r) {
_region_attr.set_new_survivor_region(r->hrm_index());
}
void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
void G1CollectedHeap::register_region_with_region_attr(G1HeapRegion* r) {
_region_attr.set_remset_is_tracked(r->hrm_index(), r->rem_set()->is_tracked());
_region_attr.set_is_pinned(r->hrm_index(), r->has_pinned_objects());
}
void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
void G1CollectedHeap::register_old_region_with_region_attr(G1HeapRegion* r) {
assert(!r->has_pinned_objects(), "must be");
assert(r->rem_set()->is_complete(), "must be");
_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
_rem_set->exclude_region_from_scan(r->hrm_index());
}
void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
void G1CollectedHeap::register_optional_region_with_region_attr(G1HeapRegion* r) {
_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
}
@ -252,7 +252,7 @@ inline bool G1CollectedHeap::is_obj_filler(const oop obj) {
return k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass();
}
inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) const {
inline bool G1CollectedHeap::is_obj_dead(const oop obj, const G1HeapRegion* hr) const {
if (hr->is_in_parsable_area(obj)) {
// This object is in the parsable part of the heap, live unless scrubbed.
return is_obj_filler(obj);
@ -286,7 +286,7 @@ inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
return is_obj_dead(obj, heap_region_containing(obj));
}
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const G1HeapRegion* hr) const {
return !is_marked(obj);
}
@ -311,7 +311,7 @@ inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
}
}
inline bool G1CollectedHeap::is_collection_set_candidate(const HeapRegion* r) const {
inline bool G1CollectedHeap::is_collection_set_candidate(const G1HeapRegion* r) const {
const G1CollectionSetCandidates* candidates = collection_set()->candidates();
return candidates->contains(r);
}

Просмотреть файл

@ -95,7 +95,7 @@ void G1CollectionSet::abandon_all_candidates() {
_optional_old_regions.clear();
}
void G1CollectionSet::add_old_region(HeapRegion* hr) {
void G1CollectionSet::add_old_region(G1HeapRegion* hr) {
assert_at_safepoint_on_vm_thread();
assert(_inc_build_state == Active,
@ -134,7 +134,7 @@ void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
OrderAccess::loadload();
for (uint i = 0; i < len; i++) {
HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
G1HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
bool result = cl->do_heap_region(r);
if (result) {
cl->set_incomplete();
@ -152,7 +152,7 @@ void G1CollectionSet::par_iterate(HeapRegionClosure* cl,
void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
assert_at_safepoint();
for (HeapRegion* r : _optional_old_regions) {
for (G1HeapRegion* r : _optional_old_regions) {
bool result = cl->do_heap_region(r);
guarantee(!result, "Must not cancel iteration");
}
@ -176,7 +176,7 @@ void G1CollectionSet::iterate_part_from(HeapRegionClosure* cl,
worker_id);
}
void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
void G1CollectionSet::add_young_region_common(G1HeapRegion* hr) {
assert(hr->is_young(), "invariant");
assert(_inc_build_state == Active, "Precondition");
@ -196,12 +196,12 @@ void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
_collection_set_cur_length++;
}
void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
void G1CollectionSet::add_survivor_regions(G1HeapRegion* hr) {
assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
add_young_region_common(hr);
}
void G1CollectionSet::add_eden_region(HeapRegion* hr) {
void G1CollectionSet::add_eden_region(G1HeapRegion* hr) {
assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
add_young_region_common(hr);
}
@ -213,7 +213,7 @@ public:
G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
if (!r->has_surv_rate_group()) {
@ -251,7 +251,7 @@ class G1PrintCollectionSetDetailClosure : public HeapRegionClosure {
public:
G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
_st->print_cr(" " HR_FORMAT ", TAMS: " PTR_FORMAT " PB: " PTR_FORMAT ", age: %4d",
@ -387,7 +387,7 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
}
void G1CollectionSet::move_candidates_to_collection_set(G1CollectionCandidateRegionList* regions) {
for (HeapRegion* r : *regions) {
for (G1HeapRegion* r : *regions) {
_g1h->clear_region_attr(r);
add_old_region(r);
}
@ -396,7 +396,7 @@ void G1CollectionSet::move_candidates_to_collection_set(G1CollectionCandidateReg
void G1CollectionSet::prepare_optional_regions(G1CollectionCandidateRegionList* regions){
uint cur_index = 0;
for (HeapRegion* r : *regions) {
for (G1HeapRegion* r : *regions) {
assert(r->is_old(), "the region should be old");
assert(!r->in_collection_set(), "should not already be in the CSet");
@ -412,7 +412,7 @@ void G1CollectionSet::move_pinned_marking_to_retained(G1CollectionCandidateRegio
}
candidates()->remove(regions);
for (HeapRegion* r : *regions) {
for (G1HeapRegion* r : *regions) {
assert(r->has_pinned_objects(), "must be pinned");
assert(r->rem_set()->is_complete(), "must be complete");
candidates()->add_retained_region_unsorted(r);
@ -427,7 +427,7 @@ void G1CollectionSet::drop_pinned_retained_regions(G1CollectionCandidateRegionLi
candidates()->remove(regions);
// We can now drop these region's remembered sets.
for (HeapRegion* r : *regions) {
for (G1HeapRegion* r : *regions) {
r->rem_set()->clear(true /* only_cardset */);
}
}
@ -457,7 +457,7 @@ bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_ti
}
void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) {
for (HeapRegion* r : _optional_old_regions) {
for (G1HeapRegion* r : _optional_old_regions) {
pss->record_unused_optional_region(r);
// Clear collection set marker and make sure that the remembered set information
// is correct as we still need it later.
@ -486,7 +486,7 @@ public:
FREE_C_HEAP_ARRAY(int, _heap_region_indices);
}
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
const uint idx = r->young_index_in_cset();
assert(idx > 0, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());

Просмотреть файл

@ -35,7 +35,7 @@ class G1GCPhaseTimes;
class G1ParScanThreadStateSet;
class G1Policy;
class G1SurvivorRegions;
class HeapRegion;
class G1HeapRegion;
class HeapRegionClaimer;
class HeapRegionClosure;
@ -168,10 +168,10 @@ class G1CollectionSet {
void verify_young_cset_indices() const NOT_DEBUG_RETURN;
// Update the incremental collection set information when adding a region.
void add_young_region_common(HeapRegion* hr);
void add_young_region_common(G1HeapRegion* hr);
// Add the given old region to the head of the current collection set.
void add_old_region(HeapRegion* hr);
void add_old_region(G1HeapRegion* hr);
void move_candidates_to_collection_set(G1CollectionCandidateRegionList* regions);
// Prepares old regions in the given set for optional collection later. Does not
@ -271,10 +271,10 @@ public:
void abandon_optional_collection_set(G1ParScanThreadStateSet* pss);
// Add eden region to the collection set.
void add_eden_region(HeapRegion* hr);
void add_eden_region(G1HeapRegion* hr);
// Add survivor region to the collection set.
void add_survivor_regions(HeapRegion* hr);
void add_survivor_regions(G1HeapRegion* hr);
#ifndef PRODUCT
bool verify_young_ages();

Просмотреть файл

@ -38,7 +38,7 @@ void G1CollectionCandidateList::set(G1CollectionSetCandidateInfo* candidate_info
_candidates.appendAll(&a);
}
void G1CollectionCandidateList::append_unsorted(HeapRegion* r) {
void G1CollectionCandidateList::append_unsorted(G1HeapRegion* r) {
G1CollectionSetCandidateInfo c(r, r->calc_gc_efficiency());
_candidates.append(c);
}
@ -135,7 +135,7 @@ int G1CollectionCandidateList::compare_reclaimble_bytes(G1CollectionSetCandidate
G1CollectionCandidateRegionList::G1CollectionCandidateRegionList() : _regions(2, mtGC) { }
void G1CollectionCandidateRegionList::append(HeapRegion* r) {
void G1CollectionCandidateRegionList::append(G1HeapRegion* r) {
assert(!_regions.contains(r), "must be");
_regions.append(r);
}
@ -144,7 +144,7 @@ void G1CollectionCandidateRegionList::remove_prefix(G1CollectionCandidateRegionL
#ifdef ASSERT
// Check that the given list is a prefix of this list.
int i = 0;
for (HeapRegion* r : *other) {
for (G1HeapRegion* r : *other) {
assert(_regions.at(i) == r, "must be in order, but element %d is not", i);
i++;
}
@ -156,7 +156,7 @@ void G1CollectionCandidateRegionList::remove_prefix(G1CollectionCandidateRegionL
_regions.remove_till(other->length());
}
HeapRegion* G1CollectionCandidateRegionList::at(uint index) {
G1HeapRegion* G1CollectionCandidateRegionList::at(uint index) {
return _regions.at(index);
}
@ -176,7 +176,7 @@ G1CollectionSetCandidates::~G1CollectionSetCandidates() {
FREE_C_HEAP_ARRAY(CandidateOrigin, _contains_map);
}
bool G1CollectionSetCandidates::is_from_marking(HeapRegion* r) const {
bool G1CollectionSetCandidates::is_from_marking(G1HeapRegion* r) const {
assert(contains(r), "must be");
return _contains_map[r->hrm_index()] == CandidateOrigin::Marking;
}
@ -200,7 +200,7 @@ void G1CollectionSetCandidates::clear() {
void G1CollectionSetCandidates::sort_marking_by_efficiency() {
G1CollectionCandidateListIterator iter = _marking_regions.begin();
for (; iter != _marking_regions.end(); ++iter) {
HeapRegion* hr = (*iter)->_r;
G1HeapRegion* hr = (*iter)->_r;
(*iter)->_gc_efficiency = hr->calc_gc_efficiency();
}
_marking_regions.sort_by_efficiency();
@ -216,7 +216,7 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1CollectionSetCandi
_marking_regions.set(candidate_infos, num_infos);
for (uint i = 0; i < num_infos; i++) {
HeapRegion* r = candidate_infos[i]._r;
G1HeapRegion* r = candidate_infos[i]._r;
assert(!contains(r), "must not contain region %u", r->hrm_index());
_contains_map[r->hrm_index()] = CandidateOrigin::Marking;
}
@ -233,7 +233,7 @@ void G1CollectionSetCandidates::sort_by_efficiency() {
_retained_regions.verify();
}
void G1CollectionSetCandidates::add_retained_region_unsorted(HeapRegion* r) {
void G1CollectionSetCandidates::add_retained_region_unsorted(G1HeapRegion* r) {
assert(!contains(r), "must not contain region %u", r->hrm_index());
_contains_map[r->hrm_index()] = CandidateOrigin::Retained;
_retained_regions.append_unsorted(r);
@ -249,7 +249,7 @@ void G1CollectionSetCandidates::remove(G1CollectionCandidateRegionList* other) {
G1CollectionCandidateRegionList other_marking_regions;
G1CollectionCandidateRegionList other_retained_regions;
for (HeapRegion* r : *other) {
for (G1HeapRegion* r : *other) {
if (is_from_marking(r)) {
other_marking_regions.append(r);
} else {
@ -260,7 +260,7 @@ void G1CollectionSetCandidates::remove(G1CollectionCandidateRegionList* other) {
_marking_regions.remove(&other_marking_regions);
_retained_regions.remove(&other_retained_regions);
for (HeapRegion* r : *other) {
for (G1HeapRegion* r : *other) {
assert(contains(r), "must contain region %u", r->hrm_index());
_contains_map[r->hrm_index()] = CandidateOrigin::Invalid;
}
@ -289,7 +289,7 @@ void G1CollectionSetCandidates::verify_helper(G1CollectionCandidateList* list, u
list->verify();
for (uint i = 0; i < (uint)list->length(); i++) {
HeapRegion* r = list->at(i)._r;
G1HeapRegion* r = list->at(i)._r;
if (is_from_marking(r)) {
from_marking++;
@ -334,13 +334,13 @@ void G1CollectionSetCandidates::verify() {
}
#endif
bool G1CollectionSetCandidates::contains(const HeapRegion* r) const {
bool G1CollectionSetCandidates::contains(const G1HeapRegion* r) const {
const uint index = r->hrm_index();
assert(index < _max_regions, "must be");
return _contains_map[index] != CandidateOrigin::Invalid;
}
const char* G1CollectionSetCandidates::get_short_type_str(const HeapRegion* r) const {
const char* G1CollectionSetCandidates::get_short_type_str(const G1HeapRegion* r) const {
static const char* type_strings[] = {
"Ci", // Invalid
"Cm", // Marking

Просмотреть файл

@ -35,29 +35,29 @@
class G1CollectionCandidateList;
class G1CollectionSetCandidates;
class HeapRegion;
class G1HeapRegion;
class HeapRegionClosure;
using G1CollectionCandidateRegionListIterator = GrowableArrayIterator<HeapRegion*>;
using G1CollectionCandidateRegionListIterator = GrowableArrayIterator<G1HeapRegion*>;
// A set of HeapRegion*, a thin wrapper around GrowableArray.
// A set of G1HeapRegion*, a thin wrapper around GrowableArray.
class G1CollectionCandidateRegionList {
GrowableArray<HeapRegion*> _regions;
GrowableArray<G1HeapRegion*> _regions;
public:
G1CollectionCandidateRegionList();
// Append a HeapRegion to the end of this list. The region must not be in the list
// Append a G1HeapRegion to the end of this list. The region must not be in the list
// already.
void append(HeapRegion* r);
// Remove the given list of HeapRegion* from this list. The given list must be a prefix
void append(G1HeapRegion* r);
// Remove the given list of G1HeapRegion* from this list. The given list must be a prefix
// of this list.
void remove_prefix(G1CollectionCandidateRegionList* list);
// Empty contents of the list.
void clear();
HeapRegion* at(uint index);
G1HeapRegion* at(uint index);
uint length() const { return (uint)_regions.length(); }
@ -66,12 +66,12 @@ public:
};
struct G1CollectionSetCandidateInfo {
HeapRegion* _r;
G1HeapRegion* _r;
double _gc_efficiency;
uint _num_unreclaimed; // Number of GCs this region has been found unreclaimable.
G1CollectionSetCandidateInfo() : G1CollectionSetCandidateInfo(nullptr, 0.0) { }
G1CollectionSetCandidateInfo(HeapRegion* r, double gc_efficiency) : _r(r), _gc_efficiency(gc_efficiency), _num_unreclaimed(0) { }
G1CollectionSetCandidateInfo(G1HeapRegion* r, double gc_efficiency) : _r(r), _gc_efficiency(gc_efficiency), _num_unreclaimed(0) { }
bool update_num_unreclaimed() {
++_num_unreclaimed;
@ -105,8 +105,8 @@ public:
// Put the given set of candidates into this list, preserving the efficiency ordering.
void set(G1CollectionSetCandidateInfo* candidate_infos, uint num_infos);
// Add the given HeapRegion to this list at the end, (potentially) making the list unsorted.
void append_unsorted(HeapRegion* r);
// Add the given G1HeapRegion to this list at the end, (potentially) making the list unsorted.
void append_unsorted(G1HeapRegion* r);
// Restore sorting order by decreasing gc efficiency, using the existing efficiency
// values.
void sort_by_efficiency();
@ -151,7 +151,7 @@ class G1CollectionSetCandidatesIterator : public StackObj {
G1CollectionSetCandidatesIterator(G1CollectionSetCandidates* which, uint position);
G1CollectionSetCandidatesIterator& operator++();
HeapRegion* operator*();
G1HeapRegion* operator*();
bool operator==(const G1CollectionSetCandidatesIterator& rhs);
bool operator!=(const G1CollectionSetCandidatesIterator& rhs);
@ -190,7 +190,7 @@ class G1CollectionSetCandidates : public CHeapObj<mtGC> {
// The number of regions from the last merge of candidates from the marking.
uint _last_marking_candidates_length;
bool is_from_marking(HeapRegion* r) const;
bool is_from_marking(G1HeapRegion* r) const;
public:
G1CollectionSetCandidates();
@ -218,14 +218,14 @@ public:
// Add the given region to the set of retained regions without regards to the
// gc efficiency sorting. The retained regions must be re-sorted manually later.
void add_retained_region_unsorted(HeapRegion* r);
void add_retained_region_unsorted(G1HeapRegion* r);
// Remove the given regions from the candidates. All given regions must be part
// of the candidates.
void remove(G1CollectionCandidateRegionList* other);
bool contains(const HeapRegion* r) const;
bool contains(const G1HeapRegion* r) const;
const char* get_short_type_str(const HeapRegion* r) const;
const char* get_short_type_str(const G1HeapRegion* r) const;
bool is_empty() const;

Просмотреть файл

@ -61,7 +61,7 @@ inline G1CollectionSetCandidatesIterator& G1CollectionSetCandidatesIterator::ope
return *this;
}
inline HeapRegion* G1CollectionSetCandidatesIterator::operator*() {
inline G1HeapRegion* G1CollectionSetCandidatesIterator::operator*() {
uint length = _which->marking_regions_length();
if (_position < length) {
return _which->_marking_regions.at(_position)._r;

Просмотреть файл

@ -91,7 +91,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
}
// Set element in array.
void set(uint idx, HeapRegion* hr) {
void set(uint idx, G1HeapRegion* hr) {
assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size);
assert(_data[idx]._r == nullptr, "Value must not have been set.");
_data[idx] = CandidateInfo(hr, 0.0);
@ -124,7 +124,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
uint _regions_added;
void add_region(HeapRegion* hr) {
void add_region(G1HeapRegion* hr) {
if (_cur_chunk_idx == _cur_chunk_end) {
_array->claim_chunk(_cur_chunk_idx, _cur_chunk_end);
}
@ -143,7 +143,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
_cur_chunk_end(0),
_regions_added(0) { }
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
// Candidates from marking are always old; also keep regions that are already
// collection set candidates (some retained regions) in that list.
if (!r->is_old() || r->is_collection_set_candidate()) {
@ -212,7 +212,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
uint max_to_prune = num_candidates - min_old_cset_length;
while (true) {
HeapRegion* r = data[num_candidates - num_pruned - 1]._r;
G1HeapRegion* r = data[num_candidates - num_pruned - 1]._r;
size_t const reclaimable = r->reclaimable_bytes();
if (num_pruned >= max_to_prune ||
wasted_bytes + reclaimable > allowed_waste) {

Просмотреть файл

@ -40,7 +40,7 @@ class G1CollectionSetChooser : public AllStatic {
public:
static size_t mixed_gc_live_threshold_bytes() {
return HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
return G1HeapRegion::GrainBytes * (size_t)G1MixedGCLiveThresholdPercent / 100;
}
static bool region_occupancy_low_enough_for_evac(size_t live_bytes) {

Просмотреть файл

@ -580,7 +580,7 @@ void G1ConcurrentMark::reset() {
_root_regions.reset();
}
void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
void G1ConcurrentMark::clear_statistics(G1HeapRegion* r) {
uint region_idx = r->hrm_index();
for (uint j = 0; j < _max_num_tasks; ++j) {
_tasks[j]->clear_mark_stats_cache(region_idx);
@ -589,7 +589,7 @@ void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
_region_mark_stats[region_idx].clear();
}
void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
void G1ConcurrentMark::humongous_object_eagerly_reclaimed(G1HeapRegion* r) {
assert_at_safepoint();
assert(r->is_starts_humongous(), "Got humongous continues region here");
@ -602,7 +602,7 @@ void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
// Clear any statistics about the region gathered so far.
_g1h->humongous_obj_regions_iterate(r,
[&] (HeapRegion* r) {
[&] (G1HeapRegion* r) {
clear_statistics(r);
});
}
@ -697,7 +697,7 @@ private:
return false;
}
HeapWord* region_clear_limit(HeapRegion* r) {
HeapWord* region_clear_limit(G1HeapRegion* r) {
// During a Concurrent Undo Mark cycle, the per region top_at_mark_start and
// live_words data are current wrt to the _mark_bitmap. We use this information
// to only clear ranges of the bitmap that require clearing.
@ -721,7 +721,7 @@ private:
_suspendible(suspendible)
{ }
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
if (has_aborted()) {
return true;
}
@ -783,7 +783,7 @@ public:
void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers, bool may_yield) {
assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
size_t const num_bytes_to_clear = (G1HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
@ -869,7 +869,7 @@ class NoteStartOfMarkHRClosure : public HeapRegionClosure {
public:
NoteStartOfMarkHRClosure() : HeapRegionClosure(), _cm(G1CollectedHeap::heap()->concurrent_mark()) { }
bool do_heap_region(HeapRegion* r) override {
bool do_heap_region(G1HeapRegion* r) override {
if (r->is_old_or_humongous() && !r->is_collection_set_candidate() && !r->in_collection_set()) {
_cm->update_top_at_mark_start(r);
}
@ -1035,7 +1035,7 @@ uint G1ConcurrentMark::calc_active_marking_workers() {
void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
#ifdef ASSERT
HeapWord* last = region->last();
HeapRegion* hr = _g1h->heap_region_containing(last);
G1HeapRegion* hr = _g1h->heap_region_containing(last);
assert(hr->is_old() || top_at_mark_start(hr) == hr->bottom(),
"Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
assert(top_at_mark_start(hr) == region->start(),
@ -1099,11 +1099,11 @@ bool G1ConcurrentMark::wait_until_root_region_scan_finished() {
return root_regions()->wait_until_scan_finished();
}
void G1ConcurrentMark::add_root_region(HeapRegion* r) {
void G1ConcurrentMark::add_root_region(G1HeapRegion* r) {
root_regions()->add(top_at_mark_start(r), r->top());
}
bool G1ConcurrentMark::is_root_region(HeapRegion* r) {
bool G1ConcurrentMark::is_root_region(G1HeapRegion* r) {
return root_regions()->contains(MemRegion(top_at_mark_start(r), r->top()));
}
@ -1233,11 +1233,11 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
_num_humongous_regions_removed(0),
_local_cleanup_list(local_cleanup_list) {}
void reclaim_empty_humongous_region(HeapRegion* hr) {
void reclaim_empty_humongous_region(G1HeapRegion* hr) {
assert(!hr->has_pinned_objects(), "precondition");
assert(hr->is_starts_humongous(), "precondition");
auto on_humongous_region = [&] (HeapRegion* hr) {
auto on_humongous_region = [&] (G1HeapRegion* hr) {
assert(hr->used() > 0, "precondition");
assert(!hr->has_pinned_objects(), "precondition");
assert(hr->is_humongous(), "precondition");
@ -1254,7 +1254,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
_g1h->humongous_obj_regions_iterate(hr, on_humongous_region);
}
void reclaim_empty_old_region(HeapRegion* hr) {
void reclaim_empty_old_region(G1HeapRegion* hr) {
assert(hr->used() > 0, "precondition");
assert(!hr->has_pinned_objects(), "precondition");
assert(hr->is_old(), "precondition");
@ -1268,7 +1268,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
_g1h->free_region(hr, _local_cleanup_list);
}
bool do_heap_region(HeapRegion* hr) override {
bool do_heap_region(G1HeapRegion* hr) override {
G1RemSetTrackingPolicy* tracker = _g1h->policy()->remset_tracker();
if (hr->is_starts_humongous()) {
// The liveness of this humongous obj decided by either its allocation
@ -1277,7 +1277,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
|| _cm->contains_live_object(hr->hrm_index());
if (is_live) {
const bool selected_for_rebuild = tracker->update_humongous_before_rebuild(hr);
auto on_humongous_region = [&] (HeapRegion* hr) {
auto on_humongous_region = [&] (G1HeapRegion* hr) {
if (selected_for_rebuild) {
_num_selected_for_rebuild++;
}
@ -1360,7 +1360,7 @@ public:
_g1h(g1h) {
}
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
// Update the remset tracking state from updating to complete
// if remembered sets have been rebuilt.
_g1h->policy()->remset_tracker()->update_after_rebuild(r);
@ -1902,24 +1902,24 @@ void G1ConcurrentMark::flush_all_task_caches() {
hits, misses, percent_of(hits, sum));
}
void G1ConcurrentMark::clear_bitmap_for_region(HeapRegion* hr) {
void G1ConcurrentMark::clear_bitmap_for_region(G1HeapRegion* hr) {
assert_at_safepoint();
_mark_bitmap.clear_range(MemRegion(hr->bottom(), hr->end()));
}
HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
G1HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
// "checkpoint" the finger
HeapWord* finger = _finger;
while (finger < _heap.end()) {
assert(_g1h->is_in_reserved(finger), "invariant");
HeapRegion* curr_region = _g1h->heap_region_containing_or_null(finger);
G1HeapRegion* curr_region = _g1h->heap_region_containing_or_null(finger);
// Make sure that the reads below do not float before loading curr_region.
OrderAccess::loadload();
// Above heap_region_containing may return null as we always scan claim
// until the end of the heap. In this case, just jump to the next region.
HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + HeapRegion::GrainWords;
HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + G1HeapRegion::GrainWords;
// Is the gap between reading the finger and doing the CAS too long?
HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
@ -1973,7 +1973,7 @@ public:
guarantee(oopDesc::is_oop(task_entry.obj()),
"Non-oop " PTR_FORMAT ", phase: %s, info: %d",
p2i(task_entry.obj()), _phase, _info);
HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
G1HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
"obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
p2i(task_entry.obj()), _phase, _info, r->hrm_index());
@ -1998,9 +1998,9 @@ void G1ConcurrentMark::verify_no_collection_set_oops() {
// Verify the global finger
HeapWord* global_finger = finger();
if (global_finger != nullptr && global_finger < _heap.end()) {
// Since we always iterate over all regions, we might get a null HeapRegion
// Since we always iterate over all regions, we might get a null G1HeapRegion
// here.
HeapRegion* global_hr = _g1h->heap_region_containing_or_null(global_finger);
G1HeapRegion* global_hr = _g1h->heap_region_containing_or_null(global_finger);
guarantee(global_hr == nullptr || global_finger == global_hr->bottom(),
"global finger: " PTR_FORMAT " region: " HR_FORMAT,
p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
@ -2013,7 +2013,7 @@ void G1ConcurrentMark::verify_no_collection_set_oops() {
HeapWord* task_finger = task->finger();
if (task_finger != nullptr && task_finger < _heap.end()) {
// See above note on the global finger verification.
HeapRegion* r = _g1h->heap_region_containing_or_null(task_finger);
G1HeapRegion* r = _g1h->heap_region_containing_or_null(task_finger);
guarantee(r == nullptr || task_finger == r->bottom() ||
!r->in_collection_set() || !r->has_index_in_opt_cset(),
"task finger: " PTR_FORMAT " region: " HR_FORMAT,
@ -2140,7 +2140,7 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
_g1h(g1h), _task(task)
{ }
void G1CMTask::setup_for_region(HeapRegion* hr) {
void G1CMTask::setup_for_region(G1HeapRegion* hr) {
assert(hr != nullptr,
"claim_region() should have filtered out null regions");
_curr_region = hr;
@ -2149,7 +2149,7 @@ void G1CMTask::setup_for_region(HeapRegion* hr) {
}
void G1CMTask::update_region_limit() {
HeapRegion* hr = _curr_region;
G1HeapRegion* hr = _curr_region;
HeapWord* bottom = hr->bottom();
HeapWord* limit = _cm->top_at_mark_start(hr);
@ -2741,7 +2741,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
assert(_curr_region == nullptr, "invariant");
assert(_finger == nullptr, "invariant");
assert(_region_limit == nullptr, "invariant");
HeapRegion* claimed_region = _cm->claim_region(_worker_id);
G1HeapRegion* claimed_region = _cm->claim_region(_worker_id);
if (claimed_region != nullptr) {
// Yes, we managed to claim one
setup_for_region(claimed_region);
@ -2996,7 +2996,7 @@ G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* p
G1PPRL_SUM_ADDR_FORMAT("reserved")
G1PPRL_SUM_BYTE_FORMAT("region-size"),
p2i(reserved.start()), p2i(reserved.end()),
HeapRegion::GrainBytes);
G1HeapRegion::GrainBytes);
log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
G1PPRL_TYPE_H_FORMAT
@ -3024,7 +3024,7 @@ G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* p
"(bytes)", "", "(bytes)");
}
bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
bool G1PrintRegionLivenessInfoClosure::do_heap_region(G1HeapRegion* r) {
if (!log_is_enabled(Trace, gc, liveness)) {
return false;
}

Просмотреть файл

@ -507,7 +507,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// method. So, this way, each task will spend very little time in
// claim_region() and is allowed to call the regular clock method
// frequently.
HeapRegion* claim_region(uint worker_id);
G1HeapRegion* claim_region(uint worker_id);
// Determines whether we've run out of regions to scan. Note that
// the finger can point past the heap end in case the heap was expanded
@ -564,25 +564,25 @@ public:
void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words = live_bytes / HeapWordSize; }
// Update the TAMS for the given region to the current top.
inline void update_top_at_mark_start(HeapRegion* r);
inline void update_top_at_mark_start(G1HeapRegion* r);
// Reset the TAMS for the given region to bottom of that region.
inline void reset_top_at_mark_start(HeapRegion* r);
inline void reset_top_at_mark_start(G1HeapRegion* r);
inline HeapWord* top_at_mark_start(const HeapRegion* r) const;
inline HeapWord* top_at_mark_start(const G1HeapRegion* r) const;
inline HeapWord* top_at_mark_start(uint region) const;
// Returns whether the given object been allocated since marking start (i.e. >= TAMS in that region).
inline bool obj_allocated_since_mark_start(oop obj) const;
// Sets the internal top_at_region_start for the given region to current top of the region.
inline void update_top_at_rebuild_start(HeapRegion* r);
inline void update_top_at_rebuild_start(G1HeapRegion* r);
// TARS for the given region during remembered set rebuilding.
inline HeapWord* top_at_rebuild_start(HeapRegion* r) const;
inline HeapWord* top_at_rebuild_start(G1HeapRegion* r) const;
// Clear statistics gathered during the concurrent cycle for the given region after
// it has been reclaimed.
void clear_statistics(HeapRegion* r);
void clear_statistics(G1HeapRegion* r);
// Notification for eagerly reclaimed regions to clean up.
void humongous_object_eagerly_reclaimed(HeapRegion* r);
void humongous_object_eagerly_reclaimed(G1HeapRegion* r);
// Manipulation of the global mark stack.
// The push and pop operations are used by tasks for transfers
// between task-local queues and the global mark stack.
@ -659,8 +659,8 @@ public:
// them.
void scan_root_regions();
bool wait_until_root_region_scan_finished();
void add_root_region(HeapRegion* r);
bool is_root_region(HeapRegion* r);
void add_root_region(G1HeapRegion* r);
bool is_root_region(G1HeapRegion* r);
void root_region_scan_abort_and_wait();
private:
@ -688,7 +688,7 @@ public:
// Clears marks for all objects in the given region in the marking
// bitmap. This should only be used to clean the bitmap during a
// safepoint.
void clear_bitmap_for_region(HeapRegion* hr);
void clear_bitmap_for_region(G1HeapRegion* hr);
// Verify that there are no collection set oops on the stacks (taskqueues /
// global mark stack) and fingers (global / per-task).
@ -758,7 +758,7 @@ private:
G1CMOopClosure* _cm_oop_closure;
// Region this task is scanning, null if we're not scanning any
HeapRegion* _curr_region;
G1HeapRegion* _curr_region;
// Local finger of this task, null if we're not scanning a region
HeapWord* _finger;
// Limit of the region this task is scanning, null if we're not scanning one
@ -806,7 +806,7 @@ private:
// Updates the local fields after this task has claimed
// a new region to scan
void setup_for_region(HeapRegion* hr);
void setup_for_region(G1HeapRegion* hr);
// Makes the limit of the region up-to-date
void update_region_limit();
@ -969,7 +969,7 @@ public:
// The header and footer are printed in the constructor and
// destructor respectively.
G1PrintRegionLivenessInfoClosure(const char* phase_name);
virtual bool do_heap_region(HeapRegion* r);
virtual bool do_heap_region(G1HeapRegion* r);
~G1PrintRegionLivenessInfoClosure();
};
#endif // SHARE_GC_G1_G1CONCURRENTMARK_HPP

Просмотреть файл

@ -71,7 +71,7 @@ inline bool G1ConcurrentMark::mark_in_bitmap(uint const worker_id, oop const obj
// Some callers may have stale objects to mark above TAMS after humongous reclaim.
// Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread.
DEBUG_ONLY(HeapRegion* const hr = _g1h->heap_region_containing(obj);)
DEBUG_ONLY(G1HeapRegion* const hr = _g1h->heap_region_containing(obj);)
assert(!hr->is_continues_humongous(),
"Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above TAMS " PTR_FORMAT,
p2i(obj), hr->hrm_index(), p2i(top_at_mark_start(hr)));
@ -184,17 +184,17 @@ inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) {
return mr.word_size();
}
inline void G1ConcurrentMark::update_top_at_mark_start(HeapRegion* r) {
inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
uint const region = r->hrm_index();
assert(region < _g1h->max_reserved_regions(), "Tried to access TAMS for region %u out of bounds", region);
_top_at_mark_starts[region] = r->top();
}
inline void G1ConcurrentMark::reset_top_at_mark_start(HeapRegion* r) {
inline void G1ConcurrentMark::reset_top_at_mark_start(G1HeapRegion* r) {
_top_at_mark_starts[r->hrm_index()] = r->bottom();
}
inline HeapWord* G1ConcurrentMark::top_at_mark_start(const HeapRegion* r) const {
inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) const {
return top_at_mark_start(r->hrm_index());
}
@ -209,11 +209,11 @@ inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
return cast_from_oop<HeapWord*>(obj) >= top_at_mark_start(region);
}
inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(HeapRegion* r) const {
inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(G1HeapRegion* r) const {
return _top_at_rebuild_starts[r->hrm_index()];
}
inline void G1ConcurrentMark::update_top_at_rebuild_start(HeapRegion* r) {
inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
uint const region = r->hrm_index();
assert(region < _g1h->max_reserved_regions(), "Tried to access TARS for region %u out of bounds", region);
assert(_top_at_rebuild_starts[region] == nullptr,

Просмотреть файл

@ -42,6 +42,6 @@ void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_r
return;
}
// We need to clear the bitmap on commit, removing any existing information.
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * G1HeapRegion::GrainWords);
_bm->clear_range(mr);
}

Просмотреть файл

@ -36,7 +36,6 @@
class G1CMBitMap;
class G1CMTask;
class G1ConcurrentMark;
class HeapRegion;
// Closure for iteration over bitmaps
class G1CMBitMapClosure {

Просмотреть файл

@ -60,7 +60,7 @@ size_t G1CMObjArrayProcessor::process_slice(HeapWord* slice) {
// slide is fast enough for "smaller" objects in non-humongous regions, but is slower
// than directly using heap region table.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion* r = g1h->heap_region_containing(slice);
G1HeapRegion* r = g1h->heap_region_containing(slice);
HeapWord* const start_address = r->is_humongous() ?
r->humongous_start_region()->bottom() :

Просмотреть файл

@ -104,7 +104,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
// the value may be changed to null during rebuilding if the region has either:
// - been allocated after rebuild start, or
// - been reclaimed by a collection.
bool should_rebuild_or_scrub(HeapRegion* hr) const {
bool should_rebuild_or_scrub(G1HeapRegion* hr) const {
return _cm->top_at_rebuild_start(hr) != nullptr;
}
@ -112,7 +112,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
// G1RebuildRemSetChunkSize. The heap region is needed check whether the region has
// been reclaimed during yielding.
// Returns true if marking has been aborted or false if completed.
bool scan_large_object(HeapRegion* hr, const oop obj, MemRegion scan_range) {
bool scan_large_object(G1HeapRegion* hr, const oop obj, MemRegion scan_range) {
HeapWord* start = scan_range.start();
HeapWord* limit = scan_range.end();
do {
@ -140,7 +140,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
// Scan for references into regions that need remembered set update for the given
// live object. Returns the offset to the next object.
size_t scan_object(HeapRegion* hr, HeapWord* current) {
size_t scan_object(G1HeapRegion* hr, HeapWord* current) {
oop obj = cast_to_oop(current);
size_t obj_size = obj->size();
@ -166,7 +166,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
}
// Scrub a range of dead objects starting at scrub_start. Will never scrub past limit.
HeapWord* scrub_to_next_live(HeapRegion* hr, HeapWord* scrub_start, HeapWord* limit) {
HeapWord* scrub_to_next_live(G1HeapRegion* hr, HeapWord* scrub_start, HeapWord* limit) {
assert(!_bitmap->is_marked(scrub_start), "Should not scrub live object");
HeapWord* scrub_end = _bitmap->get_next_marked_addr(scrub_start, limit);
@ -178,7 +178,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
// Scan the given region from bottom to parsable_bottom. Returns whether marking has
// been aborted.
bool scan_and_scrub_to_pb(HeapRegion* hr, HeapWord* start, HeapWord* const limit) {
bool scan_and_scrub_to_pb(G1HeapRegion* hr, HeapWord* start, HeapWord* const limit) {
while (start < limit) {
if (_bitmap->is_marked(start)) {
@ -205,7 +205,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
// Scan the given region from parsable_bottom to tars. Returns whether marking has
// been aborted.
bool scan_from_pb_to_tars(HeapRegion* hr, HeapWord* start, HeapWord* const limit) {
bool scan_from_pb_to_tars(G1HeapRegion* hr, HeapWord* start, HeapWord* const limit) {
while (start < limit) {
start += scan_object(hr, start);
@ -225,7 +225,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
// Scan and scrub the given region to tars. Returns whether marking has
// been aborted.
bool scan_and_scrub_region(HeapRegion* hr, HeapWord* const pb) {
bool scan_and_scrub_region(G1HeapRegion* hr, HeapWord* const pb) {
assert(should_rebuild_or_scrub(hr), "must be");
log_trace(gc, marking)("Scrub and rebuild region: " HR_FORMAT " pb: " PTR_FORMAT " TARS: " PTR_FORMAT " TAMS: " PTR_FORMAT,
@ -255,7 +255,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
// Scan a humongous region for remembered set updates. Scans in chunks to avoid
// stalling safepoints. Returns whether the concurrent marking phase has been aborted.
bool scan_humongous_region(HeapRegion* hr, HeapWord* const pb) {
bool scan_humongous_region(G1HeapRegion* hr, HeapWord* const pb) {
assert(should_rebuild_or_scrub(hr), "must be");
if (!_should_rebuild_remset) {
@ -294,7 +294,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
_should_rebuild_remset(should_rebuild_remset),
_processed_words(0) { }
bool do_heap_region(HeapRegion* hr) {
bool do_heap_region(G1HeapRegion* hr) {
// Avoid stalling safepoints and stop iteration if mark cycle has been aborted.
_cm->do_yield_check();
if (_cm->has_aborted()) {

Просмотреть файл

@ -262,7 +262,7 @@ public:
explicit RemSetSamplingClosure(G1CollectionSet* cset) :
_cset(cset), _sampled_card_rs_length(0), _sampled_code_root_rs_length(0) {}
bool do_heap_region(HeapRegion* r) override {
bool do_heap_region(G1HeapRegion* r) override {
HeapRegionRemSet* rem_set = r->rem_set();
_sampled_card_rs_length += rem_set->occupied();
_sampled_code_root_rs_length += rem_set->code_roots_list_length();
@ -317,7 +317,7 @@ bool G1ConcurrentRefine::adjust_threads_periodically() {
size_t used_bytes = _policy->estimate_used_young_bytes_locked();
Heap_lock->unlock();
adjust_young_list_target_length();
size_t young_bytes = _policy->young_list_target_length() * HeapRegion::GrainBytes;
size_t young_bytes = _policy->young_list_target_length() * G1HeapRegion::GrainBytes;
size_t available_bytes = young_bytes - MIN2(young_bytes, used_bytes);
adjust_threads_wanted(available_bytes);
_needs_adjust = false;

Просмотреть файл

@ -60,7 +60,7 @@ void G1ConcurrentRefineThreadsNeeded::update(uint active_threads,
// Estimate time until next GC, based on remaining bytes available for
// allocation and the allocation rate.
double alloc_region_rate = analytics->predict_alloc_rate_ms();
double alloc_bytes_rate = alloc_region_rate * HeapRegion::GrainBytes;
double alloc_bytes_rate = alloc_region_rate * G1HeapRegion::GrainBytes;
if (alloc_bytes_rate == 0.0) {
// A zero rate indicates we don't yet have data to use for predictions.
// Since we don't have any idea how long until the next GC, use a time of

Просмотреть файл

@ -41,7 +41,7 @@ private:
public:
G1EdenRegions() : _length(0), _used_bytes(0), _regions_on_node() { }
uint add(HeapRegion* hr) {
uint add(G1HeapRegion* hr) {
assert(!hr->is_eden(), "should not already be set");
_length++;
return _regions_on_node.add(hr);

Просмотреть файл

@ -60,7 +60,7 @@ bool G1EvacFailureRegions::record(uint worker_id, uint region_idx, bool cause_pi
_evac_failed_regions[offset] = region_idx;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion* hr = g1h->region_at(region_idx);
G1HeapRegion* hr = g1h->region_at(region_idx);
hr->note_evacuation_failure();
}

Просмотреть файл

@ -149,7 +149,7 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
}
_serial_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
_humongous_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
_region_attr_table.initialize(heap->reserved(), HeapRegion::GrainBytes);
_region_attr_table.initialize(heap->reserved(), G1HeapRegion::GrainBytes);
}
G1FullCollector::~G1FullCollector() {
@ -170,7 +170,7 @@ class PrepareRegionsClosure : public HeapRegionClosure {
public:
PrepareRegionsClosure(G1FullCollector* collector) : _collector(collector) { }
bool do_heap_region(HeapRegion* hr) {
bool do_heap_region(G1HeapRegion* hr) {
hr->prepare_for_full_gc();
G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr);
_collector->before_marking_update_attribute_table(hr);
@ -255,7 +255,7 @@ void G1FullCollector::complete_collection() {
_heap->print_heap_after_full_collection();
}
void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {
void G1FullCollector::before_marking_update_attribute_table(G1HeapRegion* hr) {
if (hr->is_free()) {
_region_attr_table.set_free(hr->hrm_index());
} else if (hr->is_humongous() || hr->has_pinned_objects()) {
@ -419,7 +419,7 @@ void G1FullCollector::phase2c_prepare_serial_compaction() {
G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
assert(!serial_cp->is_initialized(), "sanity!");
HeapRegion* start_hr = _heap->region_at(start_serial);
G1HeapRegion* start_hr = _heap->region_at(start_serial);
serial_cp->add(start_hr);
serial_cp->initialize(start_hr);
@ -428,7 +428,7 @@ void G1FullCollector::phase2c_prepare_serial_compaction() {
for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
if (is_compaction_target(i)) {
HeapRegion* current = _heap->region_at(i);
G1HeapRegion* current = _heap->region_at(i);
set_compaction_top(current, current->bottom());
serial_cp->add(current);
current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
@ -449,7 +449,7 @@ void G1FullCollector::phase2d_prepare_humongous_compaction() {
G1FullGCCompactionPoint* humongous_cp = humongous_compaction_point();
while (region_index < max_reserved_regions) {
HeapRegion* hr = _heap->region_at_or_null(region_index);
G1HeapRegion* hr = _heap->region_at_or_null(region_index);
if (hr == nullptr) {
region_index++;

Просмотреть файл

@ -45,7 +45,7 @@ class G1FullGCMarker;
class G1FullGCScope;
class G1FullGCCompactionPoint;
class GCMemoryManager;
class HeapRegion;
class G1HeapRegion;
class ReferenceProcessor;
// Subject-to-discovery closure for reference processing during Full GC. During
@ -87,7 +87,7 @@ class G1FullCollector : StackObj {
G1IsAliveClosure _is_alive;
ReferenceProcessorIsAliveMutator _is_alive_mutator;
G1RegionMarkStats* _live_stats;
GrowableArrayCHeap<HeapRegion*, mtGC> _humongous_compaction_regions;
GrowableArrayCHeap<G1HeapRegion*, mtGC> _humongous_compaction_regions;
static uint calc_active_workers();
@ -125,7 +125,7 @@ public:
return _live_stats[region_index]._live_words;
}
void before_marking_update_attribute_table(HeapRegion* hr);
void before_marking_update_attribute_table(G1HeapRegion* hr);
inline bool is_compacting(oop obj) const;
inline bool is_skip_compacting(uint region_index) const;
@ -138,14 +138,14 @@ public:
inline void update_from_compacting_to_skip_compacting(uint region_idx);
inline void update_from_skip_compacting_to_compacting(uint region_idx);
inline void set_compaction_top(HeapRegion* r, HeapWord* value);
inline HeapWord* compaction_top(HeapRegion* r) const;
inline void set_compaction_top(G1HeapRegion* r, HeapWord* value);
inline HeapWord* compaction_top(G1HeapRegion* r) const;
inline void set_has_compaction_targets();
inline bool has_compaction_targets() const;
inline void add_humongous_region(HeapRegion* hr);
inline GrowableArrayCHeap<HeapRegion*, mtGC>& humongous_compaction_regions();
inline void add_humongous_region(G1HeapRegion* hr);
inline GrowableArrayCHeap<G1HeapRegion*, mtGC>& humongous_compaction_regions();
uint truncate_parallel_cps();

Просмотреть файл

@ -62,11 +62,11 @@ void G1FullCollector::update_from_skip_compacting_to_compacting(uint region_idx)
_region_attr_table.set_compacting(region_idx);
}
void G1FullCollector::set_compaction_top(HeapRegion* r, HeapWord* value) {
void G1FullCollector::set_compaction_top(G1HeapRegion* r, HeapWord* value) {
Atomic::store(&_compaction_tops[r->hrm_index()], value);
}
HeapWord* G1FullCollector::compaction_top(HeapRegion* r) const {
HeapWord* G1FullCollector::compaction_top(G1HeapRegion* r) const {
return Atomic::load(&_compaction_tops[r->hrm_index()]);
}
@ -90,11 +90,11 @@ bool G1FullCollector::has_humongous() {
return _has_humongous;
}
void G1FullCollector::add_humongous_region(HeapRegion* hr) {
void G1FullCollector::add_humongous_region(G1HeapRegion* hr) {
_humongous_compaction_regions.append(hr);
}
GrowableArrayCHeap<HeapRegion*, mtGC>& G1FullCollector::humongous_compaction_regions() {
GrowableArrayCHeap<G1HeapRegion*, mtGC>& G1FullCollector::humongous_compaction_regions() {
return _humongous_compaction_regions;
}

Просмотреть файл

@ -61,7 +61,7 @@ class G1AdjustRegionClosure : public HeapRegionClosure {
_bitmap(collector->mark_bitmap()),
_worker_id(worker_id) { }
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
G1AdjustClosure cl(_collector);
if (r->is_humongous()) {
// Special handling for humongous regions to get somewhat better

Просмотреть файл

@ -66,7 +66,7 @@ void G1FullGCCompactTask::copy_object_to_new_location(oop obj) {
assert(cast_to_oop(destination)->klass() != nullptr, "should have a class");
}
void G1FullGCCompactTask::compact_region(HeapRegion* hr) {
void G1FullGCCompactTask::compact_region(G1HeapRegion* hr) {
assert(!hr->has_pinned_objects(), "Should be no region with pinned objects in compaction queue");
assert(!hr->is_humongous(), "Should be no humongous regions in compaction queue");
@ -87,8 +87,8 @@ void G1FullGCCompactTask::compact_region(HeapRegion* hr) {
void G1FullGCCompactTask::work(uint worker_id) {
Ticks start = Ticks::now();
GrowableArray<HeapRegion*>* compaction_queue = collector()->compaction_point(worker_id)->regions();
for (GrowableArrayIterator<HeapRegion*> it = compaction_queue->begin();
GrowableArray<G1HeapRegion*>* compaction_queue = collector()->compaction_point(worker_id)->regions();
for (GrowableArrayIterator<G1HeapRegion*> it = compaction_queue->begin();
it != compaction_queue->end();
++it) {
compact_region(*it);
@ -97,8 +97,8 @@ void G1FullGCCompactTask::work(uint worker_id) {
void G1FullGCCompactTask::serial_compaction() {
GCTraceTime(Debug, gc, phases) tm("Phase 4: Serial Compaction", collector()->scope()->timer());
GrowableArray<HeapRegion*>* compaction_queue = collector()->serial_compaction_point()->regions();
for (GrowableArrayIterator<HeapRegion*> it = compaction_queue->begin();
GrowableArray<G1HeapRegion*>* compaction_queue = collector()->serial_compaction_point()->regions();
for (GrowableArrayIterator<G1HeapRegion*> it = compaction_queue->begin();
it != compaction_queue->end();
++it) {
compact_region(*it);
@ -108,13 +108,13 @@ void G1FullGCCompactTask::serial_compaction() {
void G1FullGCCompactTask::humongous_compaction() {
GCTraceTime(Debug, gc, phases) tm("Phase 4: Humonguous Compaction", collector()->scope()->timer());
for (HeapRegion* hr : collector()->humongous_compaction_regions()) {
for (G1HeapRegion* hr : collector()->humongous_compaction_regions()) {
assert(collector()->is_compaction_target(hr->hrm_index()), "Sanity");
compact_humongous_obj(hr);
}
}
void G1FullGCCompactTask::compact_humongous_obj(HeapRegion* src_hr) {
void G1FullGCCompactTask::compact_humongous_obj(G1HeapRegion* src_hr) {
assert(src_hr->is_starts_humongous(), "Should be start region of the humongous object");
oop obj = cast_to_oop(src_hr->bottom());
@ -146,7 +146,7 @@ void G1FullGCCompactTask::free_non_overlapping_regions(uint src_start_idx, uint
dest_end_idx + 1;
for (uint i = non_overlapping_start; i <= src_end_idx; ++i) {
HeapRegion* hr = _g1h->region_at(i);
G1HeapRegion* hr = _g1h->region_at(i);
_g1h->free_humongous_region(hr, nullptr);
}
}

Просмотреть файл

@ -40,8 +40,8 @@ class G1FullGCCompactTask : public G1FullGCTask {
HeapRegionClaimer _claimer;
G1CollectedHeap* _g1h;
void compact_region(HeapRegion* hr);
void compact_humongous_obj(HeapRegion* hr);
void compact_region(G1HeapRegion* hr);
void compact_humongous_obj(G1HeapRegion* hr);
void free_non_overlapping_regions(uint src_start_idx, uint dest_start_idx, uint num_regions);
static void copy_object_to_new_location(oop obj);

Просмотреть файл

@ -35,7 +35,7 @@ G1FullGCCompactionPoint::G1FullGCCompactionPoint(G1FullCollector* collector, Pre
_current_region(nullptr),
_compaction_top(nullptr),
_preserved_stack(preserved_stack) {
_compaction_regions = new (mtGC) GrowableArray<HeapRegion*>(32, mtGC);
_compaction_regions = new (mtGC) GrowableArray<G1HeapRegion*>(32, mtGC);
_compaction_region_iterator = _compaction_regions->begin();
}
@ -61,22 +61,22 @@ bool G1FullGCCompactionPoint::is_initialized() {
return _current_region != nullptr;
}
void G1FullGCCompactionPoint::initialize(HeapRegion* hr) {
void G1FullGCCompactionPoint::initialize(G1HeapRegion* hr) {
_current_region = hr;
initialize_values();
}
HeapRegion* G1FullGCCompactionPoint::current_region() {
G1HeapRegion* G1FullGCCompactionPoint::current_region() {
return *_compaction_region_iterator;
}
HeapRegion* G1FullGCCompactionPoint::next_region() {
HeapRegion* next = *(++_compaction_region_iterator);
G1HeapRegion* G1FullGCCompactionPoint::next_region() {
G1HeapRegion* next = *(++_compaction_region_iterator);
assert(next != nullptr, "Must return valid region");
return next;
}
GrowableArray<HeapRegion*>* G1FullGCCompactionPoint::regions() {
GrowableArray<G1HeapRegion*>* G1FullGCCompactionPoint::regions() {
return _compaction_regions;
}
@ -117,16 +117,16 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) {
_current_region->update_bot_for_block(_compaction_top - size, _compaction_top);
}
void G1FullGCCompactionPoint::add(HeapRegion* hr) {
void G1FullGCCompactionPoint::add(G1HeapRegion* hr) {
_compaction_regions->append(hr);
}
void G1FullGCCompactionPoint::remove_at_or_above(uint bottom) {
HeapRegion* cur = current_region();
G1HeapRegion* cur = current_region();
assert(cur->hrm_index() >= bottom, "Sanity!");
int start_index = 0;
for (HeapRegion* r : *_compaction_regions) {
for (G1HeapRegion* r : *_compaction_regions) {
if (r->hrm_index() < bottom) {
start_index++;
}
@ -136,20 +136,20 @@ void G1FullGCCompactionPoint::remove_at_or_above(uint bottom) {
_compaction_regions->trunc_to(start_index);
}
void G1FullGCCompactionPoint::add_humongous(HeapRegion* hr) {
void G1FullGCCompactionPoint::add_humongous(G1HeapRegion* hr) {
assert(hr->is_starts_humongous(), "Sanity!");
_collector->add_humongous_region(hr);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->humongous_obj_regions_iterate(hr,
[&] (HeapRegion* r) {
[&] (G1HeapRegion* r) {
add(r);
_collector->update_from_skip_compacting_to_compacting(r->hrm_index());
});
}
void G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr) {
void G1FullGCCompactionPoint::forward_humongous(G1HeapRegion* hr) {
assert(hr->is_starts_humongous(), "Sanity!");
oop obj = cast_to_oop(hr->bottom());
@ -171,7 +171,7 @@ void G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr) {
// Preserve the mark for the humongous object as the region was initially not compacting.
preserved_stack()->push_if_necessary(obj, obj->mark());
HeapRegion* dest_hr = _compaction_regions->at(range_begin);
G1HeapRegion* dest_hr = _compaction_regions->at(range_begin);
obj->forward_to(cast_to_oop(dest_hr->bottom()));
assert(obj->is_forwarded(), "Object must be forwarded!");
@ -184,7 +184,7 @@ void G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr) {
return;
}
uint G1FullGCCompactionPoint::find_contiguous_before(HeapRegion* hr, uint num_regions) {
uint G1FullGCCompactionPoint::find_contiguous_before(G1HeapRegion* hr, uint num_regions) {
assert(num_regions > 0, "Sanity!");
assert(has_regions(), "Sanity!");

Просмотреть файл

@ -31,22 +31,22 @@
#include "utilities/pair.hpp"
class G1FullCollector;
class HeapRegion;
class G1HeapRegion;
class PreservedMarks;
class G1FullGCCompactionPoint : public CHeapObj<mtGC> {
G1FullCollector* _collector;
HeapRegion* _current_region;
HeapWord* _compaction_top;
G1HeapRegion* _current_region;
HeapWord* _compaction_top;
PreservedMarks* _preserved_stack;
GrowableArray<HeapRegion*>* _compaction_regions;
GrowableArrayIterator<HeapRegion*> _compaction_region_iterator;
GrowableArray<G1HeapRegion*>* _compaction_regions;
GrowableArrayIterator<G1HeapRegion*> _compaction_region_iterator;
bool object_will_fit(size_t size);
void initialize_values();
void switch_region();
HeapRegion* next_region();
uint find_contiguous_before(HeapRegion* hr, uint num_regions);
G1HeapRegion* next_region();
uint find_contiguous_before(G1HeapRegion* hr, uint num_regions);
public:
G1FullGCCompactionPoint(G1FullCollector* collector, PreservedMarks* preserved_stack);
@ -54,17 +54,17 @@ public:
bool has_regions();
bool is_initialized();
void initialize(HeapRegion* hr);
void initialize(G1HeapRegion* hr);
void update();
void forward(oop object, size_t size);
void forward_humongous(HeapRegion* hr);
void add(HeapRegion* hr);
void add_humongous(HeapRegion* hr);
void forward_humongous(G1HeapRegion* hr);
void add(G1HeapRegion* hr);
void add_humongous(G1HeapRegion* hr);
void remove_at_or_above(uint bottom);
HeapRegion* current_region();
G1HeapRegion* current_region();
GrowableArray<HeapRegion*>* regions();
GrowableArray<G1HeapRegion*>* regions();
PreservedMarks* preserved_stack() const {
assert(_preserved_stack != nullptr, "must be initialized");

Просмотреть файл

@ -34,7 +34,7 @@
// the table specifies whether a Full GC cycle should be compacting or skip
// compacting a region.
// Reasons for not compacting a region:
// (1) the HeapRegion itself can not be moved during this phase of the full gc
// (1) the G1HeapRegion itself can not be moved during this phase of the full gc
// (e.g. Humongous regions).
// (2) the occupancy of the region is too high to be considered eligible for compaction.
class G1FullGCHeapRegionAttr : public G1BiasedMappedArray<uint8_t> {

Просмотреть файл

@ -43,7 +43,7 @@ G1DetermineCompactionQueueClosure::G1DetermineCompactionQueueClosure(G1FullColle
_collector(collector),
_cur_worker(0) { }
bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(G1HeapRegion* hr) {
uint region_idx = hr->hrm_index();
assert(_collector->is_compaction_target(region_idx), "must be");
@ -78,7 +78,7 @@ void G1FullGCPrepareTask::work(uint worker_id) {
G1FullGCCompactionPoint* compaction_point = collector()->compaction_point(worker_id);
G1CalculatePointersClosure closure(collector(), compaction_point);
for (GrowableArrayIterator<HeapRegion*> it = compaction_point->regions()->begin();
for (GrowableArrayIterator<G1HeapRegion*> it = compaction_point->regions()->begin();
it != compaction_point->regions()->end();
++it) {
closure.do_heap_region(*it);
@ -113,7 +113,7 @@ size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
return size;
}
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(G1HeapRegion* hr) {
if (!_collector->is_free(hr->hrm_index())) {
G1PrepareCompactLiveClosure prepare_compact(_cp);
hr->apply_to_marked_objects(_bitmap, &prepare_compact);

Просмотреть файл

@ -33,7 +33,7 @@ class G1CollectedHeap;
class G1CMBitMap;
class G1FullCollector;
class G1FullGCCompactionPoint;
class HeapRegion;
class G1HeapRegion;
// Determines the regions in the heap that should be part of the compaction and
// distributes them among the compaction queues in round-robin fashion.
@ -42,9 +42,9 @@ class G1DetermineCompactionQueueClosure : public HeapRegionClosure {
G1FullCollector* _collector;
uint _cur_worker;
inline void free_empty_humongous_region(HeapRegion* hr);
inline void free_empty_humongous_region(G1HeapRegion* hr);
inline bool should_compact(HeapRegion* hr) const;
inline bool should_compact(G1HeapRegion* hr) const;
// Returns the current worker id to assign a compaction point to, and selects
// the next one round-robin style.
@ -52,12 +52,12 @@ class G1DetermineCompactionQueueClosure : public HeapRegionClosure {
inline G1FullGCCompactionPoint* next_compaction_point();
inline void add_to_compaction_queue(HeapRegion* hr);
inline void add_to_compaction_queue(G1HeapRegion* hr);
public:
G1DetermineCompactionQueueClosure(G1FullCollector* collector);
inline bool do_heap_region(HeapRegion* hr) override;
inline bool do_heap_region(G1HeapRegion* hr) override;
};
class G1FullGCPrepareTask : public G1FullGCTask {
@ -80,13 +80,13 @@ private:
G1CMBitMap* _bitmap;
G1FullGCCompactionPoint* _cp;
void prepare_for_compaction(HeapRegion* hr);
void prepare_for_compaction(G1HeapRegion* hr);
public:
G1CalculatePointersClosure(G1FullCollector* collector,
G1FullGCCompactionPoint* cp);
bool do_heap_region(HeapRegion* hr);
bool do_heap_region(G1HeapRegion* hr);
};
class G1PrepareCompactLiveClosure : public StackObj {

Просмотреть файл

@ -33,13 +33,13 @@
#include "gc/g1/g1FullGCScope.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
void G1DetermineCompactionQueueClosure::free_empty_humongous_region(HeapRegion* hr) {
void G1DetermineCompactionQueueClosure::free_empty_humongous_region(G1HeapRegion* hr) {
_g1h->free_humongous_region(hr, nullptr);
_collector->set_free(hr->hrm_index());
add_to_compaction_queue(hr);
}
inline bool G1DetermineCompactionQueueClosure::should_compact(HeapRegion* hr) const {
inline bool G1DetermineCompactionQueueClosure::should_compact(G1HeapRegion* hr) const {
// There is no need to iterate and forward objects in non-movable regions ie.
// prepare them for compaction.
if (hr->is_humongous() || hr->has_pinned_objects()) {
@ -61,7 +61,7 @@ inline G1FullGCCompactionPoint* G1DetermineCompactionQueueClosure::next_compacti
return _collector->compaction_point(next_worker());
}
inline void G1DetermineCompactionQueueClosure::add_to_compaction_queue(HeapRegion* hr) {
inline void G1DetermineCompactionQueueClosure::add_to_compaction_queue(G1HeapRegion* hr) {
_collector->set_compaction_top(hr, hr->bottom());
_collector->set_has_compaction_targets();
@ -73,12 +73,12 @@ inline void G1DetermineCompactionQueueClosure::add_to_compaction_queue(HeapRegio
cp->add(hr);
}
static bool has_pinned_objects(HeapRegion* hr) {
static bool has_pinned_objects(G1HeapRegion* hr) {
return hr->has_pinned_objects() ||
(hr->is_humongous() && hr->humongous_start_region()->has_pinned_objects());
}
inline bool G1DetermineCompactionQueueClosure::do_heap_region(HeapRegion* hr) {
inline bool G1DetermineCompactionQueueClosure::do_heap_region(G1HeapRegion* hr) {
if (should_compact(hr)) {
assert(!hr->is_humongous(), "moving humongous objects not supported.");
add_to_compaction_queue(hr);

Просмотреть файл

@ -31,12 +31,12 @@ G1FullGCResetMetadataTask::G1ResetMetadataClosure::G1ResetMetadataClosure(G1Full
_g1h(G1CollectedHeap::heap()),
_collector(collector) { }
void G1FullGCResetMetadataTask::G1ResetMetadataClosure::reset_region_metadata(HeapRegion* hr) {
void G1FullGCResetMetadataTask::G1ResetMetadataClosure::reset_region_metadata(G1HeapRegion* hr) {
hr->rem_set()->clear();
hr->clear_cardtable();
}
bool G1FullGCResetMetadataTask::G1ResetMetadataClosure::do_heap_region(HeapRegion* hr) {
bool G1FullGCResetMetadataTask::G1ResetMetadataClosure::do_heap_region(G1HeapRegion* hr) {
uint const region_idx = hr->hrm_index();
if (!_collector->is_compaction_target(region_idx)) {
assert(!hr->is_free(), "all free regions should be compaction targets");
@ -54,7 +54,7 @@ bool G1FullGCResetMetadataTask::G1ResetMetadataClosure::do_heap_region(HeapRegio
return false;
}
void G1FullGCResetMetadataTask::G1ResetMetadataClosure::scrub_skip_compacting_region(HeapRegion* hr, bool update_bot_for_live) {
void G1FullGCResetMetadataTask::G1ResetMetadataClosure::scrub_skip_compacting_region(G1HeapRegion* hr, bool update_bot_for_live) {
assert(hr->needs_scrubbing_during_full_gc(), "must be");
HeapWord* limit = hr->top();
@ -82,7 +82,7 @@ void G1FullGCResetMetadataTask::G1ResetMetadataClosure::scrub_skip_compacting_re
}
}
void G1FullGCResetMetadataTask::G1ResetMetadataClosure::reset_skip_compacting(HeapRegion* hr) {
void G1FullGCResetMetadataTask::G1ResetMetadataClosure::reset_skip_compacting(G1HeapRegion* hr) {
#ifdef ASSERT
uint region_index = hr->hrm_index();
assert(_collector->is_skip_compacting(region_index), "Only call on is_skip_compacting regions");

Просмотреть файл

@ -35,18 +35,18 @@ class G1FullGCResetMetadataTask : public G1FullGCTask {
G1CollectedHeap* _g1h;
G1FullCollector* _collector;
void reset_region_metadata(HeapRegion* hr);
void reset_region_metadata(G1HeapRegion* hr);
// Scrub all runs of dead objects within the given region by putting filler
// objects and updating the corresponding BOT. If update_bot_for_live is true,
// also update the BOT for live objects.
void scrub_skip_compacting_region(HeapRegion* hr, bool update_bot_for_live);
void scrub_skip_compacting_region(G1HeapRegion* hr, bool update_bot_for_live);
void reset_skip_compacting(HeapRegion* r);
void reset_skip_compacting(G1HeapRegion* r);
public:
G1ResetMetadataClosure(G1FullCollector* collector);
bool do_heap_region(HeapRegion* hr);
bool do_heap_region(G1HeapRegion* hr);
};
public:

Просмотреть файл

@ -52,8 +52,8 @@ G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support,
_monitoring_scope(monitoring_support),
_heap_printer(_g1h),
_region_compaction_threshold(do_maximal_compaction ?
HeapRegion::GrainWords :
(1 - MarkSweepDeadRatio / 100.0) * HeapRegion::GrainWords) { }
G1HeapRegion::GrainWords :
(1 - MarkSweepDeadRatio / 100.0) * G1HeapRegion::GrainWords) { }
bool G1FullGCScope::should_clear_soft_refs() {
return _soft_refs.should_clear();

Просмотреть файл

@ -48,21 +48,21 @@
#include "runtime/globals_extension.hpp"
#include "utilities/powerOfTwo.hpp"
uint HeapRegion::LogOfHRGrainBytes = 0;
uint HeapRegion::LogCardsPerRegion = 0;
size_t HeapRegion::GrainBytes = 0;
size_t HeapRegion::GrainWords = 0;
size_t HeapRegion::CardsPerRegion = 0;
uint G1HeapRegion::LogOfHRGrainBytes = 0;
uint G1HeapRegion::LogCardsPerRegion = 0;
size_t G1HeapRegion::GrainBytes = 0;
size_t G1HeapRegion::GrainWords = 0;
size_t G1HeapRegion::CardsPerRegion = 0;
size_t HeapRegion::max_region_size() {
size_t G1HeapRegion::max_region_size() {
return HeapRegionBounds::max_size();
}
size_t HeapRegion::min_region_size_in_words() {
size_t G1HeapRegion::min_region_size_in_words() {
return HeapRegionBounds::min_size() >> LogHeapWordSize;
}
void HeapRegion::setup_heap_region_size(size_t max_heap_size) {
void G1HeapRegion::setup_heap_region_size(size_t max_heap_size) {
size_t region_size = G1HeapRegionSize;
// G1HeapRegionSize = 0 means decide ergonomically.
if (region_size == 0) {
@ -98,7 +98,7 @@ void HeapRegion::setup_heap_region_size(size_t max_heap_size) {
}
}
void HeapRegion::handle_evacuation_failure(bool retain) {
void G1HeapRegion::handle_evacuation_failure(bool retain) {
uninstall_surv_rate_group();
clear_young_index_in_cset();
clear_index_in_opt_cset();
@ -108,13 +108,13 @@ void HeapRegion::handle_evacuation_failure(bool retain) {
_rem_set->clear(true /* only_cardset */, retain /* keep_tracked */);
}
void HeapRegion::unlink_from_list() {
void G1HeapRegion::unlink_from_list() {
set_next(nullptr);
set_prev(nullptr);
set_containing_set(nullptr);
}
void HeapRegion::hr_clear(bool clear_space) {
void G1HeapRegion::hr_clear(bool clear_space) {
set_top(bottom());
clear_young_index_in_cset();
clear_index_in_opt_cset();
@ -132,12 +132,12 @@ void HeapRegion::hr_clear(bool clear_space) {
if (clear_space) clear(SpaceDecorator::Mangle);
}
void HeapRegion::clear_cardtable() {
void G1HeapRegion::clear_cardtable() {
G1CardTable* ct = G1CollectedHeap::heap()->card_table();
ct->clear_MemRegion(MemRegion(bottom(), end()));
}
double HeapRegion::calc_gc_efficiency() {
double G1HeapRegion::calc_gc_efficiency() {
// GC efficiency is the ratio of how much space would be
// reclaimed over how long we predict it would take to reclaim it.
G1Policy* policy = G1CollectedHeap::heap()->policy();
@ -149,38 +149,38 @@ double HeapRegion::calc_gc_efficiency() {
return (double)reclaimable_bytes() / region_elapsed_time_ms;
}
void HeapRegion::set_free() {
void G1HeapRegion::set_free() {
report_region_type_change(G1HeapRegionTraceType::Free);
_type.set_free();
}
void HeapRegion::set_eden() {
void G1HeapRegion::set_eden() {
report_region_type_change(G1HeapRegionTraceType::Eden);
_type.set_eden();
}
void HeapRegion::set_eden_pre_gc() {
void G1HeapRegion::set_eden_pre_gc() {
report_region_type_change(G1HeapRegionTraceType::Eden);
_type.set_eden_pre_gc();
}
void HeapRegion::set_survivor() {
void G1HeapRegion::set_survivor() {
report_region_type_change(G1HeapRegionTraceType::Survivor);
_type.set_survivor();
}
void HeapRegion::move_to_old() {
void G1HeapRegion::move_to_old() {
if (_type.relabel_as_old()) {
report_region_type_change(G1HeapRegionTraceType::Old);
}
}
void HeapRegion::set_old() {
void G1HeapRegion::set_old() {
report_region_type_change(G1HeapRegionTraceType::Old);
_type.set_old();
}
void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
void G1HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
assert(!is_humongous(), "sanity / pre-condition");
assert(top() == bottom(), "should be empty");
@ -194,7 +194,7 @@ void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
}
}
void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
void G1HeapRegion::set_continues_humongous(G1HeapRegion* first_hr) {
assert(!is_humongous(), "sanity / pre-condition");
assert(top() == bottom(), "should be empty");
assert(first_hr->is_starts_humongous(), "pre-condition");
@ -204,18 +204,18 @@ void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
_humongous_start_region = first_hr;
}
void HeapRegion::clear_humongous() {
void G1HeapRegion::clear_humongous() {
assert(is_humongous(), "pre-condition");
assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
assert(capacity() == G1HeapRegion::GrainBytes, "pre-condition");
_humongous_start_region = nullptr;
}
void HeapRegion::prepare_remset_for_scan() {
void G1HeapRegion::prepare_remset_for_scan() {
_rem_set->reset_table_scanner();
}
HeapRegion::HeapRegion(uint hrm_index,
G1HeapRegion::G1HeapRegion(uint hrm_index,
G1BlockOffsetTable* bot,
MemRegion mr,
G1CardSetConfiguration* config) :
@ -248,7 +248,7 @@ HeapRegion::HeapRegion(uint hrm_index,
initialize();
}
void HeapRegion::initialize(bool clear_space, bool mangle_space) {
void G1HeapRegion::initialize(bool clear_space, bool mangle_space) {
assert(_rem_set->is_empty(), "Remembered set must be empty");
if (clear_space) {
@ -260,7 +260,7 @@ void HeapRegion::initialize(bool clear_space, bool mangle_space) {
hr_clear(false /*clear_space*/);
}
void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
void G1HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
HeapRegionTracer::send_region_type_change(_hrm_index,
get_trace_type(),
to,
@ -268,7 +268,7 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
used());
}
void HeapRegion::note_evacuation_failure() {
void G1HeapRegion::note_evacuation_failure() {
// PB must be bottom - we only evacuate old gen regions after scrubbing, and
// young gen regions never have their PB set to anything other than bottom.
assert(parsable_bottom_acquire() == bottom(), "must be");
@ -276,25 +276,25 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
_garbage_bytes = 0;
}
void HeapRegion::note_self_forward_chunk_done(size_t garbage_bytes) {
void G1HeapRegion::note_self_forward_chunk_done(size_t garbage_bytes) {
Atomic::add(&_garbage_bytes, garbage_bytes, memory_order_relaxed);
}
// Code roots support
void HeapRegion::add_code_root(nmethod* nm) {
void G1HeapRegion::add_code_root(nmethod* nm) {
rem_set()->add_code_root(nm);
}
void HeapRegion::remove_code_root(nmethod* nm) {
void G1HeapRegion::remove_code_root(nmethod* nm) {
rem_set()->remove_code_root(nm);
}
void HeapRegion::code_roots_do(NMethodClosure* blk) const {
void G1HeapRegion::code_roots_do(NMethodClosure* blk) const {
rem_set()->code_roots_do(blk);
}
class VerifyCodeRootOopClosure: public OopClosure {
const HeapRegion* _hr;
const G1HeapRegion* _hr;
bool _failures;
bool _has_oops_in_region;
@ -321,7 +321,7 @@ class VerifyCodeRootOopClosure: public OopClosure {
}
public:
VerifyCodeRootOopClosure(const HeapRegion* hr):
VerifyCodeRootOopClosure(const G1HeapRegion* hr):
_hr(hr), _failures(false), _has_oops_in_region(false) {}
void do_oop(narrowOop* p) { do_oop_work(p); }
@ -332,10 +332,10 @@ public:
};
class VerifyCodeRootNMethodClosure: public NMethodClosure {
const HeapRegion* _hr;
const G1HeapRegion* _hr;
bool _failures;
public:
VerifyCodeRootNMethodClosure(const HeapRegion* hr) :
VerifyCodeRootNMethodClosure(const G1HeapRegion* hr) :
_hr(hr), _failures(false) {}
void do_nmethod(nmethod* nm) {
@ -358,7 +358,7 @@ public:
bool failures() { return _failures; }
};
bool HeapRegion::verify_code_roots(VerifyOption vo) const {
bool G1HeapRegion::verify_code_roots(VerifyOption vo) const {
if (!G1VerifyHeapRegionCodeRoots) {
// We're not verifying code roots.
return false;
@ -403,9 +403,9 @@ bool HeapRegion::verify_code_roots(VerifyOption vo) const {
return nm_cl.failures();
}
void HeapRegion::print() const { print_on(tty); }
void G1HeapRegion::print() const { print_on(tty); }
void HeapRegion::print_on(outputStream* st) const {
void G1HeapRegion::print_on(outputStream* st) const {
st->print("|%4u", this->_hrm_index);
st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT,
p2i(bottom()), p2i(top()), p2i(end()));
@ -519,13 +519,13 @@ class G1VerifyLiveAndRemSetClosure : public BasicOopIterateClosure {
return _failures->record_failure();
}
void print_containing_obj(outputStream* out, HeapRegion* from) {
void print_containing_obj(outputStream* out, G1HeapRegion* from) {
log_error(gc, verify)("Field " PTR_FORMAT " of obj " PTR_FORMAT " in region " HR_FORMAT,
p2i(_p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
print_object(out, _containing_obj);
}
void print_referenced_obj(outputStream* out, HeapRegion* to, const char* explanation) {
void print_referenced_obj(outputStream* out, G1HeapRegion* to, const char* explanation) {
log_error(gc, verify)("points to %sobj " PTR_FORMAT " in region " HR_FORMAT " remset %s",
explanation, p2i(_obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
print_object(out, _obj);
@ -558,13 +558,13 @@ class G1VerifyLiveAndRemSetClosure : public BasicOopIterateClosure {
log.error("----------");
}
HeapRegion* from = this->_g1h->heap_region_containing(this->_p);
G1HeapRegion* from = this->_g1h->heap_region_containing(this->_p);
this->print_containing_obj(&ls, from);
if (!_is_in_heap) {
log.error("points to address " PTR_FORMAT " outside of heap", p2i(this->_obj));
} else {
HeapRegion* to = this->_g1h->heap_region_containing(this->_obj);
G1HeapRegion* to = this->_g1h->heap_region_containing(this->_obj);
this->print_referenced_obj(&ls, to, "dead ");
}
log.error("----------");
@ -575,8 +575,8 @@ class G1VerifyLiveAndRemSetClosure : public BasicOopIterateClosure {
struct RemSetChecker : public Checker<T> {
using CardValue = CardTable::CardValue;
HeapRegion* _from;
HeapRegion* _to;
G1HeapRegion* _from;
G1HeapRegion* _to;
CardValue _cv_obj;
CardValue _cv_field;
@ -658,7 +658,7 @@ public:
virtual inline void do_oop(oop* p) { do_oop_work(p); }
};
bool HeapRegion::verify_liveness_and_remset(VerifyOption vo) const {
bool G1HeapRegion::verify_liveness_and_remset(VerifyOption vo) const {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1VerifyFailureCounter failures;
@ -691,7 +691,7 @@ bool HeapRegion::verify_liveness_and_remset(VerifyOption vo) const {
return failures.count() != 0;
}
bool HeapRegion::verify(VerifyOption vo) const {
bool G1HeapRegion::verify(VerifyOption vo) const {
// We cast p to an oop, so region-bottom must be an obj-start.
assert(!is_humongous() || is_starts_humongous(), "invariant");
@ -710,7 +710,7 @@ bool HeapRegion::verify(VerifyOption vo) const {
return verify_code_roots(vo);
}
void HeapRegion::clear(bool mangle_space) {
void G1HeapRegion::clear(bool mangle_space) {
set_top(bottom());
if (ZapUnusedHeapArea && mangle_space) {
@ -719,12 +719,12 @@ void HeapRegion::clear(bool mangle_space) {
}
#ifndef PRODUCT
void HeapRegion::mangle_unused_area() {
void G1HeapRegion::mangle_unused_area() {
SpaceMangler::mangle_region(MemRegion(top(), end()));
}
#endif
void HeapRegion::object_iterate(ObjectClosure* blk) {
void G1HeapRegion::object_iterate(ObjectClosure* blk) {
HeapWord* p = bottom();
while (p < top()) {
if (block_is_obj(p, parsable_bottom())) {
@ -734,7 +734,7 @@ void HeapRegion::object_iterate(ObjectClosure* blk) {
}
}
void HeapRegion::fill_with_dummy_object(HeapWord* address, size_t word_size, bool zap) {
void G1HeapRegion::fill_with_dummy_object(HeapWord* address, size_t word_size, bool zap) {
// Keep the BOT in sync for old generation regions.
if (is_old()) {
update_bot_for_block(address, address + word_size);
@ -743,7 +743,7 @@ void HeapRegion::fill_with_dummy_object(HeapWord* address, size_t word_size, boo
CollectedHeap::fill_with_object(address, word_size, zap);
}
void HeapRegion::fill_range_with_dead_objects(HeapWord* start, HeapWord* end) {
void G1HeapRegion::fill_range_with_dead_objects(HeapWord* start, HeapWord* end) {
size_t range_size = pointer_delta(end, start);
// We must be a bit careful with regions that contain pinned objects. While the

Просмотреть файл

@ -41,7 +41,7 @@ class G1CollectedHeap;
class G1CMBitMap;
class G1Predictions;
class HeapRegionRemSet;
class HeapRegion;
class G1HeapRegion;
class HeapRegionSetBase;
class nmethod;
@ -54,7 +54,7 @@ class nmethod;
// sentinel value for hrm_index
#define G1_NO_HRM_INDEX ((uint) -1)
// A HeapRegion is the smallest piece of a G1CollectedHeap that
// A G1HeapRegion is the smallest piece of a G1CollectedHeap that
// can be collected independently.
// Each heap region is self contained. top() and end() can never
@ -66,7 +66,7 @@ class nmethod;
// the last will point to their own end. The last ContinuesHumongous
// region may have top() equal the end of object if there isn't
// room for filler objects to pad out to the end of the region.
class HeapRegion : public CHeapObj<mtGC> {
class G1HeapRegion : public CHeapObj<mtGC> {
friend class VMStructs;
HeapWord* const _bottom;
@ -130,10 +130,10 @@ private:
// Try to allocate at least min_word_size and up to desired_size from this region.
// Returns null if not possible, otherwise sets actual_word_size to the amount of
// space allocated.
// This version assumes that all allocation requests to this HeapRegion are properly
// This version assumes that all allocation requests to this G1HeapRegion are properly
// synchronized.
inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
// Try to allocate at least min_word_size and up to desired_size from this HeapRegion.
// Try to allocate at least min_word_size and up to desired_size from this G1HeapRegion.
// Returns null if not possible, otherwise sets actual_word_size to the amount of
// space allocated.
// This version synchronizes with other calls to par_allocate_impl().
@ -177,12 +177,12 @@ public:
// Update skip-compacting heap region to be consistent after Full GC.
void reset_skip_compacting_after_full_gc();
// All allocated blocks are occupied by objects in a HeapRegion.
// All allocated blocks are occupied by objects in a G1HeapRegion.
bool block_is_obj(const HeapWord* p, HeapWord* pb) const;
// Returns the object size for all valid block starts. If parsable_bottom (pb)
// is given, calculates the block size based on that parsable_bottom, not the
// current value of this HeapRegion.
// current value of this G1HeapRegion.
size_t block_size(const HeapWord* p) const;
size_t block_size(const HeapWord* p, HeapWord* pb) const;
@ -205,7 +205,7 @@ private:
HeapRegionType _type;
// For a humongous region, region in which it starts.
HeapRegion* _humongous_start_region;
G1HeapRegion* _humongous_start_region;
static const uint InvalidCSetIndex = UINT_MAX;
@ -214,8 +214,8 @@ private:
uint _index_in_opt_cset;
// Fields used by the HeapRegionSetBase class and subclasses.
HeapRegion* _next;
HeapRegion* _prev;
G1HeapRegion* _next;
G1HeapRegion* _prev;
#ifdef ASSERT
HeapRegionSetBase* _containing_set;
#endif // ASSERT
@ -273,7 +273,7 @@ private:
inline HeapWord* next_live_in_unparsable(const HeapWord* p, HeapWord* limit) const;
public:
HeapRegion(uint hrm_index,
G1HeapRegion(uint hrm_index,
G1BlockOffsetTable* bot,
MemRegion mr,
G1CardSetConfiguration* config);
@ -282,7 +282,7 @@ public:
// sequence, otherwise -1.
uint hrm_index() const { return _hrm_index; }
// Initializing the HeapRegion not only resets the data structure, but also
// Initializing the G1HeapRegion not only resets the data structure, but also
// resets the BOT for that heap region.
// The default values for clear_space means that we will do the clearing if
// there's clearing to be done ourselves. We also always mangle the space.
@ -400,7 +400,7 @@ public:
void set_old();
// For a humongous region, region in which it starts.
HeapRegion* humongous_start_region() const {
G1HeapRegion* humongous_start_region() const {
return _humongous_start_region;
}
@ -415,7 +415,7 @@ public:
// Makes the current region be a "continues humongous'
// region. first_hr is the "start humongous" region of the series
// which this region will be part of.
void set_continues_humongous(HeapRegion* first_hr);
void set_continues_humongous(G1HeapRegion* first_hr);
// Unsets the humongous-related fields on the region.
void clear_humongous();
@ -434,11 +434,11 @@ public:
// Getter and setter for the next and prev fields used to link regions into
// linked lists.
void set_next(HeapRegion* next) { _next = next; }
HeapRegion* next() { return _next; }
void set_next(G1HeapRegion* next) { _next = next; }
G1HeapRegion* next() { return _next; }
void set_prev(HeapRegion* prev) { _prev = prev; }
HeapRegion* prev() { return _prev; }
void set_prev(G1HeapRegion* prev) { _prev = prev; }
G1HeapRegion* prev() { return _prev; }
void unlink_from_list();
@ -466,8 +466,8 @@ public:
#endif // ASSERT
// Reset the HeapRegion to default values and clear its remembered set.
// If clear_space is true, clear the HeapRegion's memory.
// Reset the G1HeapRegion to default values and clear its remembered set.
// If clear_space is true, clear the G1HeapRegion's memory.
// Callers must ensure this is not called by multiple threads at the same time.
void hr_clear(bool clear_space);
// Clear the card table corresponding to this region.
@ -568,7 +568,7 @@ public:
HeapRegionClosure(): _is_complete(true) {}
// Typically called on each region until it returns true.
virtual bool do_heap_region(HeapRegion* r) = 0;
virtual bool do_heap_region(G1HeapRegion* r) = 0;
// True after iteration if the closure was applied to all heap regions
// and returned "false" in all cases.

Просмотреть файл

@ -42,7 +42,7 @@
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size,
inline HeapWord* G1HeapRegion::allocate_impl(size_t min_word_size,
size_t desired_word_size,
size_t* actual_size) {
HeapWord* obj = top();
@ -59,7 +59,7 @@ inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size,
}
}
inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size,
inline HeapWord* G1HeapRegion::par_allocate_impl(size_t min_word_size,
size_t desired_word_size,
size_t* actual_size) {
do {
@ -83,11 +83,11 @@ inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size,
} while (true);
}
inline HeapWord* HeapRegion::block_start(const void* addr) const {
inline HeapWord* G1HeapRegion::block_start(const void* addr) const {
return block_start(addr, parsable_bottom_acquire());
}
inline HeapWord* HeapRegion::advance_to_block_containing_addr(const void* addr,
inline HeapWord* G1HeapRegion::advance_to_block_containing_addr(const void* addr,
HeapWord* const pb,
HeapWord* first_block) const {
HeapWord* cur_block = first_block;
@ -104,25 +104,25 @@ inline HeapWord* HeapRegion::advance_to_block_containing_addr(const void* addr,
}
}
inline HeapWord* HeapRegion::block_start(const void* addr, HeapWord* const pb) const {
inline HeapWord* G1HeapRegion::block_start(const void* addr, HeapWord* const pb) const {
assert(addr >= bottom() && addr < top(), "invalid address");
HeapWord* first_block = _bot->block_start_reaching_into_card(addr);
return advance_to_block_containing_addr(addr, pb, first_block);
}
inline bool HeapRegion::is_in_parsable_area(const void* const addr) const {
inline bool G1HeapRegion::is_in_parsable_area(const void* const addr) const {
return is_in_parsable_area(addr, parsable_bottom());
}
inline bool HeapRegion::is_in_parsable_area(const void* const addr, const void* const pb) {
inline bool G1HeapRegion::is_in_parsable_area(const void* const addr, const void* const pb) {
return addr >= pb;
}
inline bool HeapRegion::is_marked_in_bitmap(oop obj) const {
inline bool G1HeapRegion::is_marked_in_bitmap(oop obj) const {
return G1CollectedHeap::heap()->concurrent_mark()->mark_bitmap()->is_marked(obj);
}
inline bool HeapRegion::block_is_obj(const HeapWord* const p, HeapWord* const pb) const {
inline bool G1HeapRegion::block_is_obj(const HeapWord* const p, HeapWord* const pb) const {
assert(p >= bottom() && p < top(), "precondition");
assert(!is_continues_humongous(), "p must point to block-start");
@ -141,24 +141,24 @@ inline bool HeapRegion::block_is_obj(const HeapWord* const p, HeapWord* const pb
return is_marked_in_bitmap(cast_to_oop(p));
}
inline HeapWord* HeapRegion::next_live_in_unparsable(G1CMBitMap* const bitmap, const HeapWord* p, HeapWord* const limit) const {
inline HeapWord* G1HeapRegion::next_live_in_unparsable(G1CMBitMap* const bitmap, const HeapWord* p, HeapWord* const limit) const {
return bitmap->get_next_marked_addr(p, limit);
}
inline HeapWord* HeapRegion::next_live_in_unparsable(const HeapWord* p, HeapWord* const limit) const {
inline HeapWord* G1HeapRegion::next_live_in_unparsable(const HeapWord* p, HeapWord* const limit) const {
G1CMBitMap* bitmap = G1CollectedHeap::heap()->concurrent_mark()->mark_bitmap();
return next_live_in_unparsable(bitmap, p, limit);
}
inline bool HeapRegion::is_collection_set_candidate() const {
inline bool G1HeapRegion::is_collection_set_candidate() const {
return G1CollectedHeap::heap()->is_collection_set_candidate(this);
}
inline size_t HeapRegion::block_size(const HeapWord* p) const {
inline size_t G1HeapRegion::block_size(const HeapWord* p) const {
return block_size(p, parsable_bottom());
}
inline size_t HeapRegion::block_size(const HeapWord* p, HeapWord* const pb) const {
inline size_t G1HeapRegion::block_size(const HeapWord* p, HeapWord* const pb) const {
assert(p < top(), "precondition");
if (!block_is_obj(p, pb)) {
@ -168,26 +168,26 @@ inline size_t HeapRegion::block_size(const HeapWord* p, HeapWord* const pb) cons
return cast_to_oop(p)->size();
}
inline void HeapRegion::prepare_for_full_gc() {
inline void G1HeapRegion::prepare_for_full_gc() {
// After marking and class unloading the heap temporarily contains dead objects
// with unloaded klasses. Moving parsable_bottom makes some (debug) code correctly
// skip dead objects.
_parsable_bottom = top();
}
inline void HeapRegion::reset_compacted_after_full_gc(HeapWord* new_top) {
inline void G1HeapRegion::reset_compacted_after_full_gc(HeapWord* new_top) {
set_top(new_top);
reset_after_full_gc_common();
}
inline void HeapRegion::reset_skip_compacting_after_full_gc() {
inline void G1HeapRegion::reset_skip_compacting_after_full_gc() {
assert(!is_free(), "must be");
reset_after_full_gc_common();
}
inline void HeapRegion::reset_after_full_gc_common() {
inline void G1HeapRegion::reset_after_full_gc_common() {
// After a full gc the mark information in a movable region is invalid. Reset marking
// information.
G1CollectedHeap::heap()->concurrent_mark()->reset_top_at_mark_start(this);
@ -204,7 +204,7 @@ inline void HeapRegion::reset_after_full_gc_common() {
}
template<typename ApplyToMarkedClosure>
inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) {
inline void G1HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) {
HeapWord* limit = top();
HeapWord* next_addr = bottom();
@ -224,24 +224,24 @@ inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarke
assert(next_addr == limit, "Should stop the scan at the limit.");
}
inline HeapWord* HeapRegion::par_allocate(size_t min_word_size,
inline HeapWord* G1HeapRegion::par_allocate(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size) {
return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
}
inline HeapWord* HeapRegion::allocate(size_t word_size) {
inline HeapWord* G1HeapRegion::allocate(size_t word_size) {
size_t temp;
return allocate(word_size, word_size, &temp);
}
inline HeapWord* HeapRegion::allocate(size_t min_word_size,
inline HeapWord* G1HeapRegion::allocate(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size) {
return allocate_impl(min_word_size, desired_word_size, actual_word_size);
}
inline void HeapRegion::update_bot() {
inline void G1HeapRegion::update_bot() {
HeapWord* next_addr = bottom();
HeapWord* prev_addr;
@ -253,7 +253,7 @@ inline void HeapRegion::update_bot() {
assert(next_addr == top(), "Should stop the scan at the limit.");
}
inline void HeapRegion::update_bot_for_block(HeapWord* start, HeapWord* end) {
inline void G1HeapRegion::update_bot_for_block(HeapWord* start, HeapWord* end) {
assert(is_in(start), "The start address must be in this region: " HR_FORMAT
" start " PTR_FORMAT " end " PTR_FORMAT,
HR_FORMAT_PARAMS(this),
@ -262,20 +262,20 @@ inline void HeapRegion::update_bot_for_block(HeapWord* start, HeapWord* end) {
_bot->update_for_block(start, end);
}
inline HeapWord* HeapRegion::parsable_bottom() const {
inline HeapWord* G1HeapRegion::parsable_bottom() const {
assert(!is_init_completed() || SafepointSynchronize::is_at_safepoint(), "only during initialization or safepoint");
return _parsable_bottom;
}
inline HeapWord* HeapRegion::parsable_bottom_acquire() const {
inline HeapWord* G1HeapRegion::parsable_bottom_acquire() const {
return Atomic::load_acquire(&_parsable_bottom);
}
inline void HeapRegion::reset_parsable_bottom() {
inline void G1HeapRegion::reset_parsable_bottom() {
Atomic::release_store(&_parsable_bottom, bottom());
}
inline void HeapRegion::note_end_of_marking(HeapWord* top_at_mark_start, size_t marked_bytes) {
inline void G1HeapRegion::note_end_of_marking(HeapWord* top_at_mark_start, size_t marked_bytes) {
assert_at_safepoint();
if (top_at_mark_start != bottom()) {
@ -287,23 +287,23 @@ inline void HeapRegion::note_end_of_marking(HeapWord* top_at_mark_start, size_t
}
}
inline void HeapRegion::note_end_of_scrubbing() {
inline void G1HeapRegion::note_end_of_scrubbing() {
reset_parsable_bottom();
}
inline bool HeapRegion::needs_scrubbing() const {
inline bool G1HeapRegion::needs_scrubbing() const {
return is_old();
}
inline bool HeapRegion::in_collection_set() const {
inline bool G1HeapRegion::in_collection_set() const {
return G1CollectedHeap::heap()->is_in_cset(this);
}
template <class Closure, bool in_gc_pause>
HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
HeapWord* G1HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
Closure* cl) {
assert(is_humongous(), "precondition");
HeapRegion* sr = humongous_start_region();
G1HeapRegion* sr = humongous_start_region();
oop obj = cast_to_oop(sr->bottom());
// If concurrent and klass_or_null is null, then space has been
@ -342,7 +342,7 @@ HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
}
template <class Closure>
inline HeapWord* HeapRegion::oops_on_memregion_iterate_in_unparsable(MemRegion mr, HeapWord* block_start, Closure* cl) {
inline HeapWord* G1HeapRegion::oops_on_memregion_iterate_in_unparsable(MemRegion mr, HeapWord* block_start, Closure* cl) {
HeapWord* const start = mr.start();
HeapWord* const end = mr.end();
@ -387,7 +387,7 @@ inline HeapWord* HeapRegion::oops_on_memregion_iterate_in_unparsable(MemRegion m
// we expect that the amount of GCs executed during scrubbing is very low so such
// tests would be unnecessary almost all the time.
template <class Closure, bool in_gc_pause>
inline HeapWord* HeapRegion::oops_on_memregion_iterate(MemRegion mr, Closure* cl) {
inline HeapWord* G1HeapRegion::oops_on_memregion_iterate(MemRegion mr, Closure* cl) {
// Cache the boundaries of the memory region in some const locals
HeapWord* const start = mr.start();
HeapWord* const end = mr.end();
@ -451,7 +451,7 @@ inline HeapWord* HeapRegion::oops_on_memregion_iterate(MemRegion mr, Closure* cl
}
template <bool in_gc_pause, class Closure>
HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
HeapWord* G1HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
Closure* cl) {
assert(MemRegion(bottom(), top()).contains(mr), "Card region not in heap region");
@ -472,26 +472,26 @@ HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
return oops_on_memregion_iterate<Closure, in_gc_pause>(mr, cl);
}
inline uint HeapRegion::age_in_surv_rate_group() const {
inline uint G1HeapRegion::age_in_surv_rate_group() const {
assert(has_surv_rate_group(), "pre-condition");
assert(has_valid_age_in_surv_rate(), "pre-condition");
return _surv_rate_group->age_in_group(_age_index);
}
inline bool HeapRegion::has_valid_age_in_surv_rate() const {
inline bool G1HeapRegion::has_valid_age_in_surv_rate() const {
return _surv_rate_group->is_valid_age_index(_age_index);
}
inline bool HeapRegion::has_surv_rate_group() const {
inline bool G1HeapRegion::has_surv_rate_group() const {
return _surv_rate_group != nullptr;
}
inline double HeapRegion::surv_rate_prediction(G1Predictions const& predictor) const {
inline double G1HeapRegion::surv_rate_prediction(G1Predictions const& predictor) const {
assert(has_surv_rate_group(), "pre-condition");
return _surv_rate_group->surv_rate_pred(predictor, age_in_surv_rate_group());
}
inline void HeapRegion::install_surv_rate_group(G1SurvRateGroup* surv_rate_group) {
inline void G1HeapRegion::install_surv_rate_group(G1SurvRateGroup* surv_rate_group) {
assert(surv_rate_group != nullptr, "pre-condition");
assert(!has_surv_rate_group(), "pre-condition");
assert(is_young(), "pre-condition");
@ -500,7 +500,7 @@ inline void HeapRegion::install_surv_rate_group(G1SurvRateGroup* surv_rate_group
_age_index = surv_rate_group->next_age_index();
}
inline void HeapRegion::uninstall_surv_rate_group() {
inline void G1HeapRegion::uninstall_surv_rate_group() {
if (has_surv_rate_group()) {
assert(has_valid_age_in_surv_rate(), "pre-condition");
assert(is_young(), "pre-condition");
@ -512,12 +512,12 @@ inline void HeapRegion::uninstall_surv_rate_group() {
}
}
inline void HeapRegion::record_surv_words_in_group(size_t words_survived) {
inline void G1HeapRegion::record_surv_words_in_group(size_t words_survived) {
uint age = age_in_surv_rate_group();
_surv_rate_group->record_surviving_words(age, words_survived);
}
inline void HeapRegion::add_pinned_object_count(size_t value) {
inline void G1HeapRegion::add_pinned_object_count(size_t value) {
assert(value != 0, "wasted effort");
assert(!is_free(), "trying to pin free region %u, adding %zu", hrm_index(), value);
Atomic::add(&_pinned_object_count, value, memory_order_relaxed);

Просмотреть файл

@ -121,7 +121,7 @@ public:
// processing on it.
//
// This means that this does NOT completely correspond to the information stored
// in a HeapRegion, but only to what is interesting for the current young collection.
// in a G1HeapRegion, but only to what is interesting for the current young collection.
class G1HeapRegionAttrBiasedMappedArray : public G1BiasedMappedArray<G1HeapRegionAttr> {
protected:
G1HeapRegionAttr default_value() const { return G1HeapRegionAttr(G1HeapRegionAttr::NotInCSet); }
@ -180,10 +180,10 @@ class G1HeapRegionAttrBiasedMappedArray : public G1BiasedMappedArray<G1HeapRegio
bool is_in_cset_or_humongous_candidate(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous_candidate(); }
bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
bool is_in_cset(const HeapRegion* hr) const { return get_by_index(hr->hrm_index()).is_in_cset(); }
bool is_in_cset(const G1HeapRegion* hr) const { return get_by_index(hr->hrm_index()).is_in_cset(); }
G1HeapRegionAttr at(HeapWord* addr) const { return get_by_address(addr); }
void clear() { G1BiasedMappedArray<G1HeapRegionAttr>::clear(); }
void clear(const HeapRegion* hr) { return set_by_index(hr->hrm_index(), G1HeapRegionAttr(G1HeapRegionAttr::NotInCSet)); }
void clear(const G1HeapRegion* hr) { return set_by_index(hr->hrm_index(), G1HeapRegionAttr(G1HeapRegionAttr::NotInCSet)); }
};
#endif // SHARE_GC_G1_G1HEAPREGIONATTR_HPP

Просмотреть файл

@ -32,7 +32,7 @@
class DumpEventInfoClosure : public HeapRegionClosure {
public:
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
EventG1HeapRegionInformation evt;
evt.set_index(r->hrm_index());
evt.set_type(r->get_trace_type());

Просмотреть файл

@ -58,7 +58,7 @@ public:
guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
}
}
bool is_correct_type(HeapRegion* hr) { return hr->is_free(); }
bool is_correct_type(G1HeapRegion* hr) { return hr->is_free(); }
const char* get_description() { return "Free Regions"; }
};
@ -85,13 +85,13 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
_bot_mapper = bot;
_cardtable_mapper = cardtable;
_regions.initialize(heap_storage->reserved(), HeapRegion::GrainBytes);
_regions.initialize(heap_storage->reserved(), G1HeapRegion::GrainBytes);
_committed_map.initialize(reserved_length());
}
HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
HeapRegion* hr = nullptr;
G1HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
G1HeapRegion* hr = nullptr;
bool from_head = !type.is_young();
G1NUMA* numa = G1NUMA::numa();
@ -118,7 +118,7 @@ HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint re
return hr;
}
HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_regions) {
G1HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_regions) {
uint candidate = find_contiguous_in_free_list(num_regions);
if (candidate == G1_NO_HRM_INDEX) {
return nullptr;
@ -126,7 +126,7 @@ HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_region
return allocate_free_regions_starting_at(candidate, num_regions);
}
HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_regions) {
G1HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_regions) {
uint candidate = find_contiguous_allow_expand(num_regions);
if (candidate == G1_NO_HRM_INDEX) {
return nullptr;
@ -135,7 +135,7 @@ HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_regions)
return allocate_free_regions_starting_at(candidate, num_regions);
}
HeapRegion* HeapRegionManager::allocate_humongous(uint num_regions) {
G1HeapRegion* HeapRegionManager::allocate_humongous(uint num_regions) {
// Special case a single region to avoid expensive search.
if (num_regions == 1) {
return allocate_free_region(HeapRegionType::Humongous, G1NUMA::AnyNodeIndex);
@ -143,20 +143,20 @@ HeapRegion* HeapRegionManager::allocate_humongous(uint num_regions) {
return allocate_humongous_from_free_list(num_regions);
}
HeapRegion* HeapRegionManager::expand_and_allocate_humongous(uint num_regions) {
G1HeapRegion* HeapRegionManager::expand_and_allocate_humongous(uint num_regions) {
return allocate_humongous_allow_expand(num_regions);
}
#ifdef ASSERT
bool HeapRegionManager::is_free(HeapRegion* hr) const {
bool HeapRegionManager::is_free(G1HeapRegion* hr) const {
return _free_list.contains(hr);
}
#endif
HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
G1HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
MemRegion mr(bottom, bottom + G1HeapRegion::GrainWords);
assert(reserved().contains(mr), "invariant");
return g1h->new_heap_region(hrm_index, mr);
}
@ -164,7 +164,7 @@ HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
void HeapRegionManager::expand(uint start, uint num_regions, WorkerThreads* pretouch_workers) {
commit_regions(start, num_regions, pretouch_workers);
for (uint i = start; i < start + num_regions; i++) {
HeapRegion* hr = _regions.get_by_index(i);
G1HeapRegion* hr = _regions.get_by_index(i);
if (hr == nullptr) {
hr = new_heap_region(i);
OrderAccess::storestore();
@ -197,7 +197,7 @@ void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
if (G1HeapRegionPrinter::is_active()) {
for (uint i = start; i < end; i++) {
// Can't use at() here since region is no longer marked available.
HeapRegion* hr = _regions.get_by_index(i);
G1HeapRegion* hr = _regions.get_by_index(i);
assert(hr != nullptr, "Region should still be present");
G1HeapRegionPrinter::uncommit(hr);
}
@ -218,7 +218,7 @@ void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
void HeapRegionManager::initialize_regions(uint start, uint num_regions) {
for (uint i = start; i < start + num_regions; i++) {
assert(is_available(i), "Just made region %u available but is apparently not.", i);
HeapRegion* hr = at(i);
G1HeapRegion* hr = at(i);
hr->initialize();
hr->set_node_index(G1NUMA::numa()->index_for_region(hr));
@ -248,7 +248,7 @@ void HeapRegionManager::deactivate_regions(uint start, uint num_regions) {
// Reset NUMA index to and print state change.
uint end = start + num_regions;
for (uint i = start; i < end; i++) {
HeapRegion* hr = at(i);
G1HeapRegion* hr = at(i);
hr->set_node_index(G1NUMA::UnknownNodeIndex);
G1HeapRegionPrinter::inactive(hr);
}
@ -430,7 +430,7 @@ void HeapRegionManager::assert_contiguous_range(uint start, uint num_regions) {
// General sanity check, regions found should either be available and empty
// or not available so that we can make them available and use them.
for (uint i = start; i < (start + num_regions); i++) {
HeapRegion* hr = _regions.get_by_index(i);
G1HeapRegion* hr = _regions.get_by_index(i);
assert(!is_available(i) || hr->is_free(),
"Found region sequence starting at " UINT32_FORMAT ", length " UINT32_FORMAT
" that is not free at " UINT32_FORMAT ". Hr is " PTR_FORMAT ", type is %s",
@ -486,11 +486,11 @@ uint HeapRegionManager::find_contiguous_allow_expand(uint num_regions) {
return find_contiguous_in_range(0, reserved_length(), num_regions);
}
HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
G1HeapRegion* HeapRegionManager::next_region_in_heap(const G1HeapRegion* r) const {
guarantee(r != nullptr, "Start region must be a valid region");
guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index());
for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
HeapRegion* hr = _regions.get_by_index(i);
G1HeapRegion* hr = _regions.get_by_index(i);
if (is_available(i)) {
return hr;
}
@ -505,7 +505,7 @@ void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
if (!is_available(i)) {
continue;
}
guarantee(at(i) != nullptr, "Tried to access region %u that has a null HeapRegion*", i);
guarantee(at(i) != nullptr, "Tried to access region %u that has a null G1HeapRegion*", i);
bool res = blk->do_heap_region(at(i));
if (res) {
blk->set_incomplete();
@ -534,7 +534,7 @@ uint HeapRegionManager::find_highest_free(bool* expanded) {
// entry which is either free or not yet committed. If not yet
// committed, expand at that index.
for (uint curr = reserved_length(); curr-- > 0;) {
HeapRegion *hr = _regions.get_by_index(curr);
G1HeapRegion* hr = _regions.get_by_index(curr);
if (hr == nullptr || !is_available(curr)) {
// Found uncommitted and free region, expand to make it available for use.
expand_exact(curr, 1, nullptr);
@ -563,7 +563,7 @@ bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* com
commits++;
expand_exact(curr_index, 1, pretouch_workers);
}
HeapRegion* curr_region = _regions.get_by_index(curr_index);
G1HeapRegion* curr_region = _regions.get_by_index(curr_index);
if (!curr_region->is_free()) {
return false;
}
@ -587,7 +587,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* h
if (!is_available(index)) {
continue;
}
HeapRegion* r = _regions.get_by_index(index);
G1HeapRegion* r = _regions.get_by_index(index);
// We'll ignore regions already claimed.
if (hrclaimer->is_region_claimed(index)) {
continue;
@ -699,7 +699,7 @@ void HeapRegionManager::verify() {
continue;
}
num_committed++;
HeapRegion* hr = _regions.get_by_index(i);
G1HeapRegion* hr = _regions.get_by_index(i);
guarantee(hr != nullptr, "invariant: i: %u", i);
guarantee(!prev_committed || hr->bottom() == prev_end,
"invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT,
@ -800,9 +800,9 @@ public:
return;
}
FreeRegionList *free_list = worker_freelist(worker_id);
FreeRegionList* free_list = worker_freelist(worker_id);
for (uint i = start; i < end; i++) {
HeapRegion *region = _hrm->at_or_null(i);
G1HeapRegion* region = _hrm->at_or_null(i);
if (region != nullptr && region->is_free()) {
// Need to clear old links to allow to be added to new freelist.
region->unlink_from_list();

Просмотреть файл

@ -32,19 +32,19 @@
#include "memory/allocation.hpp"
#include "services/memoryUsage.hpp"
class HeapRegion;
class G1HeapRegion;
class HeapRegionClosure;
class HeapRegionClaimer;
class FreeRegionList;
class WorkerThreads;
class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
class G1HeapRegionTable : public G1BiasedMappedArray<G1HeapRegion*> {
protected:
virtual HeapRegion* default_value() const { return nullptr; }
virtual G1HeapRegion* default_value() const { return nullptr; }
};
// This class keeps track of the actual heap memory, auxiliary data
// and its metadata (i.e., HeapRegion instances) and the list of free regions.
// and its metadata (i.e., G1HeapRegion instances) and the list of free regions.
//
// This allows maximum flexibility for deciding what to commit or uncommit given
// a request from outside.
@ -55,9 +55,9 @@ class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
// regions that are consecutive in the array should also be adjacent in the
// address space (i.e., region(i).end() == region(i+1).bottom().
//
// We create a HeapRegion when we commit the region's address space
// We create a G1HeapRegion when we commit the region's address space
// for the first time. When we uncommit the address space of a
// region we retain the HeapRegion to be able to re-use it in the
// region we retain the G1HeapRegion to be able to re-use it in the
// future (in case we recommit it).
//
// We keep track of four lengths:
@ -81,7 +81,7 @@ class HeapRegionManager: public CHeapObj<mtGC> {
// can either be active (ready for use) or inactive (ready for uncommit).
G1CommittedRegionMap _committed_map;
// Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
// Internal only. The highest heap region +1 we allocated a G1HeapRegion instance for.
uint _allocated_heapregions_length;
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
@ -137,12 +137,12 @@ class HeapRegionManager: public CHeapObj<mtGC> {
void reactivate_regions(uint start, uint num_regions);
void uncommit_regions(uint start, uint num_regions);
// Allocate a new HeapRegion for the given index.
HeapRegion* new_heap_region(uint hrm_index);
// Allocate a new G1HeapRegion for the given index.
G1HeapRegion* new_heap_region(uint hrm_index);
// Humongous allocation helpers
HeapRegion* allocate_humongous_from_free_list(uint num_regions);
HeapRegion* allocate_humongous_allow_expand(uint num_regions);
G1HeapRegion* allocate_humongous_from_free_list(uint num_regions);
G1HeapRegion* allocate_humongous_allow_expand(uint num_regions);
// Expand helper for cases when the regions to expand are well defined.
void expand_exact(uint start, uint num_regions, WorkerThreads* pretouch_workers);
@ -153,7 +153,7 @@ class HeapRegionManager: public CHeapObj<mtGC> {
#ifdef ASSERT
public:
bool is_free(HeapRegion* hr) const;
bool is_free(G1HeapRegion* hr) const;
#endif
public:
// Empty constructor, we'll initialize it with the initialize() method.
@ -165,32 +165,32 @@ public:
G1RegionToSpaceMapper* cardtable);
// Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
// new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
// new G1HeapRegion that owns G1HeapRegion at index 0. Since at the moment we commit
// the heap from the lowest address, this region (and its associated data
// structures) are available and we do not need to check further.
HeapRegion* get_dummy_region() { return new_heap_region(0); }
G1HeapRegion* get_dummy_region() { return new_heap_region(0); }
// Return the HeapRegion at the given index. Assume that the index
// Return the G1HeapRegion at the given index. Assume that the index
// is valid.
inline HeapRegion* at(uint index) const;
inline G1HeapRegion* at(uint index) const;
// Return the HeapRegion at the given index, null if the index
// Return the G1HeapRegion at the given index, null if the index
// is for an unavailable region.
inline HeapRegion* at_or_null(uint index) const;
inline G1HeapRegion* at_or_null(uint index) const;
// Returns whether the given region is available for allocation.
inline bool is_available(uint region) const;
// Return the next region (by index) that is part of the same
// humongous object that hr is part of.
inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
inline G1HeapRegion* next_region_in_humongous(G1HeapRegion* hr) const;
// If addr is within the committed space return its corresponding
// HeapRegion, otherwise return null.
inline HeapRegion* addr_to_region(HeapWord* addr) const;
// G1HeapRegion, otherwise return null.
inline G1HeapRegion* addr_to_region(HeapWord* addr) const;
// Insert the given region into the free region list.
inline void insert_into_free_list(HeapRegion* hr);
inline void insert_into_free_list(G1HeapRegion* hr);
// Rebuild the free region list from scratch.
void rebuild_free_list(WorkerThreads* workers);
@ -201,15 +201,15 @@ public:
}
// Allocate a free region with specific node index. If fails allocate with next node index.
HeapRegion* allocate_free_region(HeapRegionType type, uint requested_node_index);
G1HeapRegion* allocate_free_region(HeapRegionType type, uint requested_node_index);
// Allocate a humongous object from the free list
HeapRegion* allocate_humongous(uint num_regions);
G1HeapRegion* allocate_humongous(uint num_regions);
// Allocate a humongous object by expanding the heap
HeapRegion* expand_and_allocate_humongous(uint num_regions);
G1HeapRegion* expand_and_allocate_humongous(uint num_regions);
inline HeapRegion* allocate_free_regions_starting_at(uint first, uint num_regions);
inline G1HeapRegion* allocate_free_regions_starting_at(uint first, uint num_regions);
// Remove all regions from the free list.
void remove_all_free_regions() {
@ -226,7 +226,7 @@ public:
}
size_t total_free_bytes() const {
return num_free_regions() * HeapRegion::GrainBytes;
return num_free_regions() * G1HeapRegion::GrainBytes;
}
// Return the number of regions available (uncommitted) regions.
@ -247,14 +247,14 @@ public:
// Expand the sequence to reflect that the heap has grown. Either create new
// HeapRegions, or re-use existing ones. Returns the number of regions the
// sequence was expanded by. If a HeapRegion allocation fails, the resulting
// sequence was expanded by. If a G1HeapRegion allocation fails, the resulting
// number of regions might be smaller than what's desired.
uint expand_by(uint num_regions, WorkerThreads* pretouch_workers);
// Try to expand on the given node index, returning the index of the new region.
uint expand_on_preferred_node(uint node_index);
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
G1HeapRegion* next_region_in_heap(const G1HeapRegion* r) const;
// Find the highest free or uncommitted region in the reserved heap,
// and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.

Просмотреть файл

@ -35,7 +35,7 @@ inline bool HeapRegionManager::is_available(uint region) const {
return _committed_map.active(region);
}
inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
inline G1HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
assert(addr < heap_end(),
"addr: " PTR_FORMAT " end: " PTR_FORMAT, p2i(addr), p2i(heap_end()));
assert(addr >= heap_bottom(),
@ -43,25 +43,25 @@ inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
return _regions.get_by_address(addr);
}
inline HeapRegion* HeapRegionManager::at(uint index) const {
inline G1HeapRegion* HeapRegionManager::at(uint index) const {
assert(is_available(index), "pre-condition");
HeapRegion* hr = _regions.get_by_index(index);
G1HeapRegion* hr = _regions.get_by_index(index);
assert(hr != nullptr, "sanity");
assert(hr->hrm_index() == index, "sanity");
return hr;
}
inline HeapRegion* HeapRegionManager::at_or_null(uint index) const {
inline G1HeapRegion* HeapRegionManager::at_or_null(uint index) const {
if (!is_available(index)) {
return nullptr;
}
HeapRegion* hr = _regions.get_by_index(index);
assert(hr != nullptr, "All available regions must have a HeapRegion but index %u has not.", index);
G1HeapRegion* hr = _regions.get_by_index(index);
assert(hr != nullptr, "All available regions must have a G1HeapRegion but index %u has not.", index);
assert(hr->hrm_index() == index, "sanity");
return hr;
}
inline HeapRegion* HeapRegionManager::next_region_in_humongous(HeapRegion* hr) const {
inline G1HeapRegion* HeapRegionManager::next_region_in_humongous(G1HeapRegion* hr) const {
uint index = hr->hrm_index();
assert(is_available(index), "pre-condition");
assert(hr->is_humongous(), "next_region_in_humongous should only be called for a humongous region.");
@ -73,12 +73,12 @@ inline HeapRegion* HeapRegionManager::next_region_in_humongous(HeapRegion* hr) c
}
}
inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) {
inline void HeapRegionManager::insert_into_free_list(G1HeapRegion* hr) {
_free_list.add_ordered(hr);
}
inline HeapRegion* HeapRegionManager::allocate_free_regions_starting_at(uint first, uint num_regions) {
HeapRegion* start = at(first);
inline G1HeapRegion* HeapRegionManager::allocate_free_regions_starting_at(uint first, uint num_regions) {
G1HeapRegion* start = at(first);
_free_list.remove_starting_at(start, num_regions);
return start;
}

Просмотреть файл

@ -34,7 +34,7 @@ class FreeRegionList;
class G1HeapRegionPrinter : public AllStatic {
// Print an action event.
static void print(const char* action, HeapRegion* hr) {
static void print(const char* action, G1HeapRegion* hr) {
log_trace(gc, region)("G1HR %s(%s) [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT "]",
action, hr->get_type_str(), p2i(hr->bottom()), p2i(hr->top()), p2i(hr->end()));
}
@ -47,31 +47,31 @@ public:
// The methods below are convenient wrappers for the print() method.
static void alloc(HeapRegion* hr) { print("ALLOC", hr); }
static void alloc(G1HeapRegion* hr) { print("ALLOC", hr); }
static void retire(HeapRegion* hr) { print("RETIRE", hr); }
static void retire(G1HeapRegion* hr) { print("RETIRE", hr); }
static void reuse(HeapRegion* hr) { print("REUSE", hr); }
static void reuse(G1HeapRegion* hr) { print("REUSE", hr); }
static void cset(HeapRegion* hr) { print("CSET", hr); }
static void cset(G1HeapRegion* hr) { print("CSET", hr); }
static void evac_failure(HeapRegion* hr) { print("EVAC-FAILURE", hr); }
static void evac_failure(G1HeapRegion* hr) { print("EVAC-FAILURE", hr); }
static void mark_reclaim(HeapRegion* hr) { print("MARK-RECLAIM", hr); }
static void mark_reclaim(G1HeapRegion* hr) { print("MARK-RECLAIM", hr); }
static void eager_reclaim(HeapRegion* hr) { print("EAGER-RECLAIM", hr); }
static void eager_reclaim(G1HeapRegion* hr) { print("EAGER-RECLAIM", hr); }
static void evac_reclaim(HeapRegion* hr) { print("EVAC-RECLAIM", hr); }
static void evac_reclaim(G1HeapRegion* hr) { print("EVAC-RECLAIM", hr); }
static void post_compaction(HeapRegion* hr) { print("POST-COMPACTION", hr); }
static void post_compaction(G1HeapRegion* hr) { print("POST-COMPACTION", hr); }
static void commit(HeapRegion* hr) { print("COMMIT", hr); }
static void commit(G1HeapRegion* hr) { print("COMMIT", hr); }
static void active(HeapRegion* hr) { print("ACTIVE", hr); }
static void active(G1HeapRegion* hr) { print("ACTIVE", hr); }
static void inactive(HeapRegion* hr) { print("INACTIVE", hr); }
static void inactive(G1HeapRegion* hr) { print("INACTIVE", hr); }
static void uncommit(HeapRegion* hr) { print("UNCOMMIT", hr); }
static void uncommit(G1HeapRegion* hr) { print("UNCOMMIT", hr); }
};
#endif // SHARE_GC_G1_G1HEAPREGIONPRINTER_HPP

Просмотреть файл

@ -55,7 +55,7 @@ void HeapRegionRemSet::initialize(MemRegion reserved) {
_heap_base_address = reserved.start();
}
HeapRegionRemSet::HeapRegionRemSet(HeapRegion* hr,
HeapRegionRemSet::HeapRegionRemSet(G1HeapRegion* hr,
G1CardSetConfiguration* config) :
_code_roots(),
_card_set_mm(config, G1CollectedHeap::heap()->card_set_freelist_pool()),
@ -123,7 +123,7 @@ void HeapRegionRemSet::code_roots_do(NMethodClosure* blk) const {
_code_roots.nmethods_do(blk);
}
void HeapRegionRemSet::clean_code_roots(HeapRegion* hr) {
void HeapRegionRemSet::clean_code_roots(G1HeapRegion* hr) {
_code_roots.clean(hr);
}

Просмотреть файл

@ -49,7 +49,7 @@ class HeapRegionRemSet : public CHeapObj<mtGC> {
// The set of cards in the Java heap
G1CardSet _card_set;
HeapRegion* _hr;
G1HeapRegion* _hr;
// Cached value of heap base address.
static HeapWord* _heap_base_address;
@ -57,7 +57,7 @@ class HeapRegionRemSet : public CHeapObj<mtGC> {
void clear_fcc();
public:
HeapRegionRemSet(HeapRegion* hr, G1CardSetConfiguration* config);
HeapRegionRemSet(G1HeapRegion* hr, G1CardSetConfiguration* config);
bool cardset_is_empty() const {
return _card_set.is_empty();
@ -155,7 +155,7 @@ public:
// Applies blk->do_nmethod() to each of the entries in _code_roots
void code_roots_do(NMethodClosure* blk) const;
// Clean out code roots not having an oop pointing into this region any more.
void clean_code_roots(HeapRegion* hr);
void clean_code_roots(G1HeapRegion* hr);
// Returns the number of elements in _code_roots
size_t code_roots_list_length() const {

Просмотреть файл

@ -31,7 +31,7 @@
uint FreeRegionList::_unrealistically_long_length = 0;
#ifndef PRODUCT
void HeapRegionSetBase::verify_region(HeapRegion* hr) {
void HeapRegionSetBase::verify_region(G1HeapRegion* hr) {
assert(hr->containing_set() == this, "Inconsistent containing set for %u", hr->hrm_index());
assert(!hr->is_young(), "Adding young region %u", hr->hrm_index()); // currently we don't use these sets for young regions
assert(_checker == nullptr || _checker->is_correct_type(hr), "Wrong type of region %u (%s) and set %s",
@ -99,11 +99,11 @@ void FreeRegionList::remove_all() {
check_mt_safety();
verify_optional();
HeapRegion* curr = _head;
G1HeapRegion* curr = _head;
while (curr != nullptr) {
verify_region(curr);
HeapRegion* next = curr->next();
G1HeapRegion* next = curr->next();
curr->set_next(nullptr);
curr->set_prev(nullptr);
curr->set_containing_set(nullptr);
@ -134,7 +134,7 @@ void FreeRegionList::add_list_common_start(FreeRegionList* from_list) {
#ifdef ASSERT
FreeRegionListIterator iter(from_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
G1HeapRegion* hr = iter.get_next();
// In set_containing_set() we check that we either set the value
// from null to non-null or vice versa to catch bugs. So, we have
// to null it first before setting it to the value.
@ -189,8 +189,8 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
_head = from_list->_head;
_tail = from_list->_tail;
} else {
HeapRegion* curr_to = _head;
HeapRegion* curr_from = from_list->_head;
G1HeapRegion* curr_to = _head;
G1HeapRegion* curr_from = from_list->_head;
while (curr_from != nullptr) {
while (curr_to != nullptr && curr_to->hrm_index() < curr_from->hrm_index()) {
@ -203,7 +203,7 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
curr_from->set_prev(_tail);
curr_from = nullptr;
} else {
HeapRegion* next_from = curr_from->next();
G1HeapRegion* next_from = curr_from->next();
curr_from->set_next(curr_to);
curr_from->set_prev(curr_to->prev());
@ -227,7 +227,7 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
}
#ifdef ASSERT
void FreeRegionList::verify_region_to_remove(HeapRegion* curr, HeapRegion* next) {
void FreeRegionList::verify_region_to_remove(G1HeapRegion* curr, G1HeapRegion* next) {
assert_free_region_list(_head != next, "invariant");
if (next != nullptr) {
assert_free_region_list(next->prev() == curr, "invariant");
@ -235,7 +235,7 @@ void FreeRegionList::verify_region_to_remove(HeapRegion* curr, HeapRegion* next)
} else {
assert_free_region_list(_tail == curr, "invariant");
}
HeapRegion* prev = curr->prev();
G1HeapRegion* prev = curr->prev();
if (prev == nullptr) {
assert_free_region_list(_head == curr, "invariant");
} else {
@ -244,7 +244,7 @@ void FreeRegionList::verify_region_to_remove(HeapRegion* curr, HeapRegion* next)
}
#endif
void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
void FreeRegionList::remove_starting_at(G1HeapRegion* first, uint num_regions) {
check_mt_safety();
assert_free_region_list(num_regions >= 1, "pre-condition");
assert_free_region_list(!is_empty(), "pre-condition");
@ -254,13 +254,13 @@ void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
DEBUG_ONLY(uint old_length = length();)
// prev points to the node right before first or null when first == _head
HeapRegion* const prev = first->prev();
G1HeapRegion* const prev = first->prev();
// next points to the node right after first or null when first == _tail,
// and after the while loop below, next should point to the next node right
// after the removed sublist, or null if the sublist contains _tail.
HeapRegion* next = first->next();
G1HeapRegion* next = first->next();
HeapRegion* curr = first;
G1HeapRegion* curr = first;
uint count = 0;
while (count < num_regions) {
verify_region(curr);
@ -329,9 +329,9 @@ void FreeRegionList::clear() {
}
void FreeRegionList::verify_list() {
HeapRegion* curr = _head;
HeapRegion* prev1 = nullptr;
HeapRegion* prev0 = nullptr;
G1HeapRegion* curr = _head;
G1HeapRegion* prev1 = nullptr;
G1HeapRegion* prev0 = nullptr;
uint count = 0;
size_t capacity = 0;
uint last_index = 0;

Просмотреть файл

@ -53,8 +53,8 @@ class HeapRegionSetChecker : public CHeapObj<mtGC> {
public:
// Verify MT safety for this HeapRegionSet.
virtual void check_mt_safety() = 0;
// Returns true if the given HeapRegion is of the correct type for this HeapRegionSet.
virtual bool is_correct_type(HeapRegion* hr) = 0;
// Returns true if the given G1HeapRegion is of the correct type for this HeapRegionSet.
virtual bool is_correct_type(G1HeapRegion* hr) = 0;
// Return a description of the type of regions this HeapRegionSet contains.
virtual const char* get_description() = 0;
};
@ -79,7 +79,7 @@ protected:
// verify_region() is used to ensure that the contents of a region
// added to / removed from a set are consistent.
void verify_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_region(G1HeapRegion* hr) PRODUCT_RETURN;
void check_mt_safety() {
if (_checker != nullptr) {
@ -98,15 +98,15 @@ public:
// It updates the fields of the set to reflect hr being added to
// the set and tags the region appropriately.
inline void add(HeapRegion* hr);
inline void add(G1HeapRegion* hr);
// It updates the fields of the set to reflect hr being removed
// from the set and tags the region appropriately.
inline void remove(HeapRegion* hr);
inline void remove(G1HeapRegion* hr);
virtual void verify();
void verify_start();
void verify_next_region(HeapRegion* hr);
void verify_next_region(G1HeapRegion* hr);
void verify_end();
void verify_optional() { DEBUG_ONLY(verify();) }
@ -163,19 +163,19 @@ private:
void add(NodeInfo* info);
};
HeapRegion* _head;
HeapRegion* _tail;
G1HeapRegion* _head;
G1HeapRegion* _tail;
// _last is used to keep track of where we added an element the last
// time. It helps to improve performance when adding several ordered items in a row.
HeapRegion* _last;
G1HeapRegion* _last;
NodeInfo* _node_info;
static uint _unrealistically_long_length;
inline HeapRegion* remove_from_head_impl();
inline HeapRegion* remove_from_tail_impl();
inline G1HeapRegion* remove_from_head_impl();
inline G1HeapRegion* remove_from_tail_impl();
inline void increase_length(uint node_index);
inline void decrease_length(uint node_index);
@ -184,7 +184,7 @@ private:
void add_list_common_start(FreeRegionList* from_list);
void add_list_common_end(FreeRegionList* from_list);
void verify_region_to_remove(HeapRegion* curr, HeapRegion* next) NOT_DEBUG_RETURN;
void verify_region_to_remove(G1HeapRegion* curr, G1HeapRegion* next) NOT_DEBUG_RETURN;
protected:
// See the comment for HeapRegionSetBase::clear()
virtual void clear();
@ -196,7 +196,7 @@ public:
void verify_list();
#ifdef ASSERT
bool contains(HeapRegion* hr) const {
bool contains(G1HeapRegion* hr) const {
return hr->containing_set() == this;
}
#endif
@ -206,14 +206,14 @@ public:
// Add hr to the list. The region should not be a member of another set.
// Assumes that the list is ordered and will preserve that order. The order
// is determined by hrm_index.
inline void add_ordered(HeapRegion* hr);
inline void add_ordered(G1HeapRegion* hr);
// Same restrictions as above, but adds the region last in the list.
inline void add_to_tail(HeapRegion* region_to_add);
inline void add_to_tail(G1HeapRegion* region_to_add);
// Removes from head or tail based on the given argument.
HeapRegion* remove_region(bool from_head);
G1HeapRegion* remove_region(bool from_head);
HeapRegion* remove_region_with_node_index(bool from_head,
G1HeapRegion* remove_region_with_node_index(bool from_head,
uint requested_node_index);
// Merge two ordered lists. The result is also ordered. The order is
@ -231,7 +231,7 @@ public:
// Remove all (contiguous) regions from first to first + num_regions -1 from
// this list.
// Num_regions must be >= 1.
void remove_starting_at(HeapRegion* first, uint num_regions);
void remove_starting_at(G1HeapRegion* first, uint num_regions);
virtual void verify();
@ -245,21 +245,21 @@ public:
class FreeRegionListIterator : public StackObj {
private:
FreeRegionList* _list;
HeapRegion* _curr;
G1HeapRegion* _curr;
public:
bool more_available() {
return _curr != nullptr;
}
HeapRegion* get_next() {
G1HeapRegion* get_next() {
assert(more_available(),
"get_next() should be called when more regions are available");
// If we are going to introduce a count in the iterator we should
// do the "cycle" check.
HeapRegion* hr = _curr;
G1HeapRegion* hr = _curr;
_list->verify_region(hr);
_curr = hr->next();
return hr;

Просмотреть файл

@ -29,7 +29,7 @@
#include "gc/g1/g1NUMA.hpp"
inline void HeapRegionSetBase::add(HeapRegion* hr) {
inline void HeapRegionSetBase::add(G1HeapRegion* hr) {
check_mt_safety();
assert_heap_region_set(hr->containing_set() == nullptr, "should not already have a containing set");
assert_heap_region_set(hr->next() == nullptr, "should not already be linked");
@ -40,7 +40,7 @@ inline void HeapRegionSetBase::add(HeapRegion* hr) {
verify_region(hr);
}
inline void HeapRegionSetBase::remove(HeapRegion* hr) {
inline void HeapRegionSetBase::remove(G1HeapRegion* hr) {
check_mt_safety();
verify_region(hr);
assert_heap_region_set(hr->next() == nullptr, "should already be unlinked");
@ -51,7 +51,7 @@ inline void HeapRegionSetBase::remove(HeapRegion* hr) {
_length--;
}
inline void FreeRegionList::add_to_tail(HeapRegion* region_to_add) {
inline void FreeRegionList::add_to_tail(G1HeapRegion* region_to_add) {
assert_free_region_list((length() == 0 && _head == nullptr && _tail == nullptr && _last == nullptr) ||
(length() > 0 && _head != nullptr && _tail != nullptr && _tail->hrm_index() < region_to_add->hrm_index()),
"invariant");
@ -71,7 +71,7 @@ inline void FreeRegionList::add_to_tail(HeapRegion* region_to_add) {
increase_length(region_to_add->node_index());
}
inline void FreeRegionList::add_ordered(HeapRegion* hr) {
inline void FreeRegionList::add_ordered(G1HeapRegion* hr) {
assert_free_region_list((length() == 0 && _head == nullptr && _tail == nullptr && _last == nullptr) ||
(length() > 0 && _head != nullptr && _tail != nullptr),
"invariant");
@ -80,7 +80,7 @@ inline void FreeRegionList::add_ordered(HeapRegion* hr) {
// Now link the region
if (_head != nullptr) {
HeapRegion* curr;
G1HeapRegion* curr;
if (_last != nullptr && _last->hrm_index() < hr->hrm_index()) {
curr = _last;
@ -120,8 +120,8 @@ inline void FreeRegionList::add_ordered(HeapRegion* hr) {
increase_length(hr->node_index());
}
inline HeapRegion* FreeRegionList::remove_from_head_impl() {
HeapRegion* result = _head;
inline G1HeapRegion* FreeRegionList::remove_from_head_impl() {
G1HeapRegion* result = _head;
_head = result->next();
if (_head == nullptr) {
_tail = nullptr;
@ -132,8 +132,8 @@ inline HeapRegion* FreeRegionList::remove_from_head_impl() {
return result;
}
inline HeapRegion* FreeRegionList::remove_from_tail_impl() {
HeapRegion* result = _tail;
inline G1HeapRegion* FreeRegionList::remove_from_tail_impl() {
G1HeapRegion* result = _tail;
_tail = result->prev();
if (_tail == nullptr) {
@ -145,7 +145,7 @@ inline HeapRegion* FreeRegionList::remove_from_tail_impl() {
return result;
}
inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
inline G1HeapRegion* FreeRegionList::remove_region(bool from_head) {
check_mt_safety();
verify_optional();
@ -154,7 +154,7 @@ inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
}
assert_free_region_list(length() > 0 && _head != nullptr && _tail != nullptr, "invariant");
HeapRegion* hr;
G1HeapRegion* hr;
if (from_head) {
hr = remove_from_head_impl();
@ -174,12 +174,12 @@ inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
return hr;
}
inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
inline G1HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
uint requested_node_index) {
assert(UseNUMA, "Invariant");
const uint max_search_depth = G1NUMA::numa()->max_search_depth();
HeapRegion* cur;
G1HeapRegion* cur;
// Find the region to use, searching from _head or _tail as requested.
size_t cur_depth = 0;
@ -207,8 +207,8 @@ inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
}
// Splice the region out of the list.
HeapRegion* prev = cur->prev();
HeapRegion* next = cur->next();
G1HeapRegion* prev = cur->prev();
G1HeapRegion* next = cur->next();
if (prev == nullptr) {
_head = next;
} else {

Просмотреть файл

@ -123,7 +123,7 @@ size_t G1HeapSizingPolicy::young_collection_expansion_amount() {
bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics;
if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
(filled_history_buffer && (long_term_pause_time_ratio > threshold))) {
size_t min_expand_bytes = HeapRegion::GrainBytes;
size_t min_expand_bytes = G1HeapRegion::GrainBytes;
size_t reserved_bytes = _g1h->max_capacity();
size_t committed_bytes = _g1h->capacity();
size_t uncommitted_bytes = reserved_bytes - committed_bytes;
@ -224,7 +224,7 @@ size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand) {
// GC where eden is empty. During Remark there can be an
// arbitrary number of eden regions which would skew the
// results.
_g1h->eden_regions_count() * HeapRegion::GrainBytes;
_g1h->eden_regions_count() * G1HeapRegion::GrainBytes;
size_t minimum_desired_capacity = target_heap_capacity(used_after_gc, MinHeapFreeRatio);
size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio);

Просмотреть файл

@ -82,7 +82,7 @@ struct G1HeapTransition::DetailedUsage : public StackObj {
class G1HeapTransition::DetailedUsageClosure: public HeapRegionClosure {
public:
DetailedUsage _usage;
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
if (r->is_old()) {
_usage._old_used += r->used();
_usage._old_region_count++;
@ -157,17 +157,17 @@ void G1HeapTransition::print() {
log_regions("Survivor", _before._survivor_length, after._survivor_length, survivor_capacity_length_before_gc,
_before._survivor_length_per_node, after._survivor_length_per_node);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K);
usage._survivor_used / K, ((after._survivor_length * G1HeapRegion::GrainBytes) - usage._survivor_used) / K);
log_info(gc, heap)("Old regions: " SIZE_FORMAT "->" SIZE_FORMAT,
_before._old_length, after._old_length);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._old_used / K, ((after._old_length * HeapRegion::GrainBytes) - usage._old_used) / K);
usage._old_used / K, ((after._old_length * G1HeapRegion::GrainBytes) - usage._old_used) / K);
log_info(gc, heap)("Humongous regions: " SIZE_FORMAT "->" SIZE_FORMAT,
_before._humongous_length, after._humongous_length);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._humongous_used / K, ((after._humongous_length * HeapRegion::GrainBytes) - usage._humongous_used) / K);
usage._humongous_used / K, ((after._humongous_length * G1HeapRegion::GrainBytes) - usage._humongous_used) / K);
MetaspaceUtils::print_metaspace_change(_before._meta_sizes);
}

Просмотреть файл

@ -110,7 +110,7 @@ class G1VerifyCodeRootOopClosure: public OopClosure {
oop obj = CompressedOops::decode_not_null(heap_oop);
// Now fetch the region containing the object
HeapRegion* hr = _g1h->heap_region_containing(obj);
G1HeapRegion* hr = _g1h->heap_region_containing(obj);
HeapRegionRemSet* hrrs = hr->rem_set();
// Verify that the code root list for this region
// contains the nmethod
@ -198,11 +198,11 @@ class VerifyObjsInRegionClosure: public ObjectClosure {
private:
G1CollectedHeap* _g1h;
size_t _live_bytes;
HeapRegion *_hr;
G1HeapRegion* _hr;
VerifyOption _vo;
public:
VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
VerifyObjsInRegionClosure(G1HeapRegion* hr, VerifyOption vo)
: _live_bytes(0), _hr(hr), _vo(vo) {
_g1h = G1CollectedHeap::heap();
}
@ -245,7 +245,7 @@ public:
return _failures;
}
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
guarantee(!r->has_index_in_opt_cset(), "Region %u still has opt collection set index %u", r->hrm_index(), r->index_in_opt_cset());
guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str());
// Humongous and old regions regions might be of any state, so can't check here.
@ -394,7 +394,7 @@ public:
_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
_old_count(), _humongous_count(), _free_count(){ }
bool do_heap_region(HeapRegion* hr) {
bool do_heap_region(G1HeapRegion* hr) {
if (hr->is_young()) {
// TODO
} else if (hr->is_humongous()) {
@ -452,7 +452,7 @@ class G1VerifyRegionMarkingStateClosure : public HeapRegionClosure {
};
public:
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
if (r->is_free()) {
return false;
}
@ -541,7 +541,7 @@ void G1HeapVerifier::verify_bitmap_clear(bool from_tams) {
public:
G1VerifyBitmapClear(bool from_tams) : _from_tams(from_tams) { }
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
G1CMBitMap* bitmap = cm->mark_bitmap();
@ -562,7 +562,7 @@ class G1VerifyCardTableCleanup: public HeapRegionClosure {
public:
G1VerifyCardTableCleanup(G1HeapVerifier* verifier)
: _verifier(verifier) { }
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
if (r->is_survivor()) {
_verifier->verify_dirty_region(r);
} else {
@ -579,14 +579,14 @@ void G1HeapVerifier::verify_card_table_cleanup() {
}
}
void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
void G1HeapVerifier::verify_not_dirty_region(G1HeapRegion* hr) {
// All of the region should be clean.
G1CardTable* ct = _g1h->card_table();
MemRegion mr(hr->bottom(), hr->end());
ct->verify_not_dirty_region(mr);
}
void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
void G1HeapVerifier::verify_dirty_region(G1HeapRegion* hr) {
// We cannot guarantee that [bottom(),end()] is dirty. Threads
// dirty allocated blocks as they allocate them. The thread that
// retires each region and replaces it with a new one will do a
@ -608,7 +608,7 @@ private:
G1HeapVerifier* _verifier;
public:
G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
_verifier->verify_dirty_region(r);
return false;
}
@ -626,7 +626,7 @@ private:
public:
G1CheckRegionAttrTableClosure() : HeapRegionClosure(), _failures(false) { }
virtual bool do_heap_region(HeapRegion* hr) {
virtual bool do_heap_region(G1HeapRegion* hr) {
uint i = hr->hrm_index();
G1HeapRegionAttr region_attr = (G1HeapRegionAttr) G1CollectedHeap::heap()->_region_attr.get_by_index(i);
if (hr->is_humongous()) {

Просмотреть файл

@ -80,8 +80,8 @@ public:
void verify_card_table_cleanup() PRODUCT_RETURN;
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_not_dirty_region(G1HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(G1HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_regions() PRODUCT_RETURN;
};

Просмотреть файл

@ -253,8 +253,8 @@ void G1MonitoringSupport::recalculate_sizes() {
uint eden_list_max_length = young_list_target_length - survivor_list_length;
// First calculate the committed sizes that can be calculated independently.
_survivor_space_committed = survivor_list_length * HeapRegion::GrainBytes;
_old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used);
_survivor_space_committed = survivor_list_length * G1HeapRegion::GrainBytes;
_old_gen_committed = G1HeapRegion::align_up_to_region_byte_size(_old_gen_used);
// Next, start with the overall committed size.
_overall_committed = _g1h->capacity();
@ -266,7 +266,7 @@ void G1MonitoringSupport::recalculate_sizes() {
committed -= _survivor_space_committed + _old_gen_committed;
// Next, calculate and remove the committed size for the eden.
_eden_space_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
_eden_space_committed = (size_t) eden_list_max_length * G1HeapRegion::GrainBytes;
// Somewhat defensive: be robust in case there are inaccuracies in
// the calculations
_eden_space_committed = MIN2(_eden_space_committed, committed);

Просмотреть файл

@ -40,7 +40,7 @@ void G1NMethodClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
T oop_or_narrowoop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(oop_or_narrowoop)) {
oop o = CompressedOops::decode_not_null(oop_or_narrowoop);
HeapRegion* hr = _g1h->heap_region_containing(o);
G1HeapRegion* hr = _g1h->heap_region_containing(o);
assert(!_g1h->is_in_cset(o) || hr->rem_set()->code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
hr->add_code_root(_nm);
}

Просмотреть файл

@ -173,7 +173,7 @@ uint G1NUMA::index_of_address(HeapWord *address) const {
}
}
uint G1NUMA::index_for_region(HeapRegion* hr) const {
uint G1NUMA::index_for_region(G1HeapRegion* hr) const {
if (!is_enabled()) {
return 0;
}
@ -194,15 +194,15 @@ uint G1NUMA::index_for_region(HeapRegion* hr) const {
// nodes. Which node to request for a given address is given by the
// region size and the page size. Below are two examples on 4 NUMA nodes system:
// 1. G1HeapRegionSize(_region_size) is larger than or equal to page size.
// * Page #: |-0--||-1--||-2--||-3--||-4--||-5--||-6--||-7--||-8--||-9--||-10-||-11-||-12-||-13-||-14-||-15-|
// * HeapRegion #: |----#0----||----#1----||----#2----||----#3----||----#4----||----#5----||----#6----||----#7----|
// * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
// * Page #: |-0--||-1--||-2--||-3--||-4--||-5--||-6--||-7--||-8--||-9--||-10-||-11-||-12-||-13-||-14-||-15-|
// * G1HeapRegion #: |----#0----||----#1----||----#2----||----#3----||----#4----||----#5----||----#6----||----#7----|
// * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
// 2. G1HeapRegionSize(_region_size) is smaller than page size.
// Memory will be touched one page at a time because G1RegionToSpaceMapper commits
// pages one by one.
// * Page #: |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----|
// * HeapRegion #: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
// * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
// * Page #: |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----|
// * G1HeapRegion #: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
// * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
void G1NUMA::request_memory_on_node(void* aligned_address, size_t size_in_bytes, uint region_index) {
if (!is_enabled()) {
return;
@ -288,7 +288,7 @@ G1NodeIndexCheckClosure::~G1NodeIndexCheckClosure() {
FREE_C_HEAP_ARRAY(uint, _total);
}
bool G1NodeIndexCheckClosure::do_heap_region(HeapRegion* hr) {
bool G1NodeIndexCheckClosure::do_heap_region(G1HeapRegion* hr) {
// Preferred node index will only have valid node index.
uint preferred_node_index = _numa->preferred_node_index_for_index(hr->hrm_index());
// Active node index may have UnknownNodeIndex.

Просмотреть файл

@ -46,7 +46,7 @@ class G1NUMA: public CHeapObj<mtGC> {
// Total number of node ids.
uint _num_active_node_ids;
// HeapRegion size
// G1HeapRegion size
size_t _region_size;
// Necessary when touching memory.
size_t _page_size;
@ -94,9 +94,9 @@ public:
// Returns node index of current calling thread.
uint index_of_current_thread() const;
// Returns the preferred index for the given HeapRegion index.
// Returns the preferred index for the given G1HeapRegion index.
// This assumes that HeapRegions are evenly spit, so we can decide preferred index
// with the given HeapRegion index.
// with the given G1HeapRegion index.
// Result is less than num_active_nodes().
uint preferred_node_index_for_index(uint region_index) const;
@ -107,7 +107,7 @@ public:
// If AlwaysPreTouch is enabled, return actual node index via system call.
// If disabled, return preferred node index of the given heap region.
uint index_for_region(HeapRegion* hr) const;
uint index_for_region(G1HeapRegion* hr) const;
// Requests the given memory area to be located at the given node index.
void request_memory_on_node(void* aligned_address, size_t size_in_bytes, uint region_index);
@ -143,7 +143,7 @@ public:
G1NodeIndexCheckClosure(const char* desc, G1NUMA* numa, LogStream* ls);
~G1NodeIndexCheckClosure();
bool do_heap_region(HeapRegion* hr);
bool do_heap_region(G1HeapRegion* hr);
};
#endif // SHARE_VM_GC_G1_NUMA_HPP

Просмотреть файл

@ -30,7 +30,6 @@
#include "memory/iterator.hpp"
#include "oops/markWord.hpp"
class HeapRegion;
class G1CollectedHeap;
class G1RemSet;
class G1ConcurrentMark;

Просмотреть файл

@ -88,7 +88,7 @@ inline void G1ScanEvacuatedObjClosure::do_oop_work(T* p) {
const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
if (region_attr.is_in_cset()) {
prefetch_and_push(p, obj);
} else if (!HeapRegion::is_in_same_region(p, obj)) {
} else if (!G1HeapRegion::is_in_same_region(p, obj)) {
handle_non_cset_obj_common(region_attr, p, obj);
assert(_skip_card_enqueue != Uninitialized, "Scan location has not been initialized.");
if (_skip_card_enqueue == True) {
@ -135,7 +135,7 @@ inline void G1ConcurrentRefineOopClosure::do_oop_work(T* p) {
check_obj_during_refinement(p, obj);
if (HeapRegion::is_in_same_region(p, obj)) {
if (G1HeapRegion::is_in_same_region(p, obj)) {
// Normally this closure should only be called with cross-region references.
// But since Java threads are manipulating the references concurrently and we
// reload the values things may have changed.
@ -174,7 +174,7 @@ inline void G1ScanCardClosure::do_oop_work(T* p) {
// that this is a cross-region reference too.
prefetch_and_push(p, obj);
_heap_roots_found++;
} else if (!HeapRegion::is_in_same_region(p, obj)) {
} else if (!G1HeapRegion::is_in_same_region(p, obj)) {
handle_non_cset_obj_common(region_attr, p, obj);
_par_scan_state->enqueue_card_if_tracked(region_attr, p, obj);
}
@ -261,11 +261,11 @@ template <class T> void G1RebuildRemSetClosure::do_oop_work(T* p) {
return;
}
if (HeapRegion::is_in_same_region(p, obj)) {
if (G1HeapRegion::is_in_same_region(p, obj)) {
return;
}
HeapRegion* to = _g1h->heap_region_containing(obj);
G1HeapRegion* to = _g1h->heap_region_containing(obj);
HeapRegionRemSet* rem_set = to->rem_set();
if (rem_set->is_tracked()) {
rem_set->add_reference(p, _worker_id);

Просмотреть файл

@ -277,7 +277,7 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
}
// Skip the card enqueue iff the object (to_array) is in survivor region.
// However, HeapRegion::is_survivor() is too expensive here.
// However, G1HeapRegion::is_survivor() is too expensive here.
// Instead, we use dest_attr.is_young() because the two values are always
// equal: successfully allocated young regions must be survivor regions.
assert(dest_attr.is_young() == _g1h->heap_region_containing(to_array)->is_survivor(), "must be");
@ -444,7 +444,7 @@ void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) {
HeapWord* obj_start = cast_from_oop<HeapWord*>(obj);
HeapRegion* region = _g1h->heap_region_containing(obj_start);
G1HeapRegion* region = _g1h->heap_region_containing(obj_start);
region->update_bot_for_block(obj_start, obj_start + word_sz);
}
@ -469,7 +469,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
uint age = 0;
G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
HeapRegion* const from_region = _g1h->heap_region_containing(old);
G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
uint node_index = from_region->node_index();
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
@ -552,7 +552,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
}
// Skip the card enqueue iff the object (obj) is in survivor region.
// However, HeapRegion::is_survivor() is too expensive here.
// However, G1HeapRegion::is_survivor() is too expensive here.
// Instead, we use dest_attr.is_young() because the two values are always
// equal: successfully allocated young regions must be survivor regions.
assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be");
@ -623,7 +623,7 @@ void G1ParScanThreadStateSet::flush_stats() {
_flushed = true;
}
void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
void G1ParScanThreadStateSet::record_unused_optional_region(G1HeapRegion* hr) {
for (uint worker_index = 0; worker_index < _num_workers; ++worker_index) {
G1ParScanThreadState* pss = _states[worker_index];
assert(pss != nullptr, "must be initialized");
@ -640,7 +640,7 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
if (forward_ptr == nullptr) {
// Forward-to-self succeeded. We are the "owner" of the object.
HeapRegion* r = _g1h->heap_region_containing(old);
G1HeapRegion* r = _g1h->heap_region_containing(old);
if (_evac_failure_regions->record(_worker_id, r->hrm_index(), cause_pinned)) {
G1HeapRegionPrinter::evac_failure(r);

Просмотреть файл

@ -46,7 +46,7 @@ class G1EvacFailureRegions;
class G1EvacuationRootClosures;
class G1OopStarChunkedList;
class G1PLABAllocator;
class HeapRegion;
class G1HeapRegion;
class PreservedMarks;
class PreservedMarksSet;
class outputStream;
@ -238,7 +238,7 @@ public:
template <typename T>
inline void remember_reference_into_optional_region(T* p);
inline G1OopStarChunkedList* oops_into_optional_region(const HeapRegion* hr);
inline G1OopStarChunkedList* oops_into_optional_region(const G1HeapRegion* hr);
};
class G1ParScanThreadStateSet : public StackObj {
@ -265,7 +265,7 @@ class G1ParScanThreadStateSet : public StackObj {
PreservedMarksSet* preserved_marks_set() { return &_preserved_marks_set; }
void flush_stats();
void record_unused_optional_region(HeapRegion* hr);
void record_unused_optional_region(G1HeapRegion* hr);
G1ParScanThreadState* state_for_worker(uint worker_id);
uint num_workers() const { return _num_workers; }

Просмотреть файл

@ -89,7 +89,7 @@ inline void G1ParScanThreadState::remember_reference_into_optional_region(T* p)
verify_task(p);
}
G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const HeapRegion* hr) {
G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const G1HeapRegion* hr) {
assert(hr->index_in_opt_cset() < _max_num_optional_regions,
"Trying to access optional region idx %u beyond " SIZE_FORMAT " " HR_FORMAT,
hr->index_in_opt_cset(), _max_num_optional_regions, HR_FORMAT_PARAMS(hr));
@ -109,7 +109,7 @@ template <class T> bool G1ParScanThreadState::enqueue_if_new(T* p) {
}
template <class T> void G1ParScanThreadState::enqueue_card_into_evac_fail_region(T* p, oop obj) {
assert(!HeapRegion::is_in_same_region(p, obj), "Should have filtered out cross-region references already.");
assert(!G1HeapRegion::is_in_same_region(p, obj), "Should have filtered out cross-region references already.");
assert(!_g1h->heap_region_containing(p)->is_survivor(), "Should have filtered out from-newly allocated survivor references already.");
assert(_g1h->heap_region_containing(obj)->in_collection_set(), "Only for enqeueing reference into collection set region");
@ -120,7 +120,7 @@ template <class T> void G1ParScanThreadState::enqueue_card_into_evac_fail_region
template <class T> void G1ParScanThreadState::write_ref_field_post(T* p, oop obj) {
assert(obj != nullptr, "Must be");
if (HeapRegion::is_in_same_region(p, obj)) {
if (G1HeapRegion::is_in_same_region(p, obj)) {
return;
}
G1HeapRegionAttr from_attr = _g1h->region_attr(p);
@ -144,14 +144,14 @@ template <class T> void G1ParScanThreadState::write_ref_field_post(T* p, oop obj
}
template <class T> void G1ParScanThreadState::enqueue_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o) {
assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
assert(!G1HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
assert(!_g1h->heap_region_containing(p)->is_survivor(), "Should have filtered out from-newly allocated survivor references already.");
// We relabel all regions that failed evacuation as old gen without remembered,
// and so pre-filter them out in the caller.
assert(!_g1h->heap_region_containing(o)->in_collection_set(), "Should not try to enqueue reference into collection set region");
#ifdef ASSERT
HeapRegion* const hr_obj = _g1h->heap_region_containing(o);
G1HeapRegion* const hr_obj = _g1h->heap_region_containing(o);
assert(region_attr.remset_is_tracked() == hr_obj->rem_set()->is_tracked(),
"State flag indicating remset tracking disagrees (%s) with actual remembered set (%s) for region %u",
BOOL_TO_STR(region_attr.remset_is_tracked()),

Просмотреть файл

@ -142,7 +142,7 @@ class G1YoungLengthPredictor {
return false;
}
const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes;
const size_t free_bytes = (_base_free_regions - young_length) * G1HeapRegion::GrainBytes;
// When copying, we will likely need more bytes free than is live in the region.
// Add some safety margin to factor in the confidence of our guess, and the
@ -173,7 +173,7 @@ void G1Policy::record_new_heap_size(uint new_number_of_regions) {
_young_gen_sizer.heap_size_changed(new_number_of_regions);
_ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
_ihop_control->update_target_occupancy(new_number_of_regions * G1HeapRegion::GrainBytes);
}
uint G1Policy::calculate_desired_eden_length_by_mmu() const {
@ -507,9 +507,9 @@ uint G1Policy::calculate_desired_eden_length_before_mixed(double base_time_ms,
}
double G1Policy::predict_survivor_regions_evac_time() const {
const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
const GrowableArray<G1HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
double survivor_regions_evac_time = predict_young_region_other_time_ms(_g1h->survivor()->length());
for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
for (GrowableArrayIterator<G1HeapRegion*> it = survivor_regions->begin();
it != survivor_regions->end();
++it) {
survivor_regions_evac_time += predict_region_copy_time_ms(*it, _g1h->collector_state()->in_young_only_phase());
@ -529,7 +529,7 @@ double G1Policy::predict_retained_regions_evac_time() const {
list.length());
for (G1CollectionSetCandidateInfo* ci : list) {
HeapRegion* r = ci->_r;
G1HeapRegion* r = ci->_r;
// We optimistically assume that any of these marking candidate regions will
// be reclaimable the next gc, so just consider them as normal.
if (r->has_pinned_objects()) {
@ -599,7 +599,7 @@ void G1Policy::record_full_collection_end() {
_survivor_surv_rate_group->reset();
update_young_length_bounds();
_old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * HeapRegion::GrainBytes);
_old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * G1HeapRegion::GrainBytes);
record_pause(G1GCPauseType::FullGC, _full_collection_start_sec, end_sec);
}
@ -663,12 +663,12 @@ bool G1Policy::should_retain_evac_failed_region(uint index) const {
size_t live_bytes = _g1h->region_at(index)->live_bytes();
#ifdef ASSERT
HeapRegion* r = _g1h->region_at(index);
G1HeapRegion* r = _g1h->region_at(index);
assert(live_bytes != 0,
"live bytes not set for %u used %zu garbage %zu cm-live %zu pinned %d",
index, r->used(), r->garbage_bytes(), live_bytes, r->has_pinned_objects());
#endif
size_t threshold = G1RetainRegionLiveThresholdPercent * HeapRegion::GrainBytes / 100;
size_t threshold = G1RetainRegionLiveThresholdPercent * G1HeapRegion::GrainBytes / 100;
return live_bytes < threshold;
}
@ -954,7 +954,7 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
update_young_length_bounds();
_old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * HeapRegion::GrainBytes);
_old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * G1HeapRegion::GrainBytes);
update_ihop_prediction(app_time_ms / 1000.0,
G1GCPauseTypeHelper::is_young_only_pause(this_pause));
@ -1040,7 +1040,7 @@ void G1Policy::update_ihop_prediction(double mutator_time_s,
// restrained by the heap reserve. Using the actual length would make the
// prediction too small and the limit the young gen every time we get to the
// predicted target occupancy.
size_t young_gen_size = young_list_desired_length() * HeapRegion::GrainBytes;
size_t young_gen_size = young_list_desired_length() * G1HeapRegion::GrainBytes;
_ihop_control->update_allocation_info(mutator_time_s, young_gen_size);
report = true;
}
@ -1093,7 +1093,7 @@ double G1Policy::predict_base_time_ms(size_t pending_cards) const {
return predict_base_time_ms(pending_cards, card_rs_length, code_root_rs_length);
}
size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
size_t G1Policy::predict_bytes_to_copy(G1HeapRegion* hr) const {
size_t bytes_to_copy;
if (!hr->is_young()) {
bytes_to_copy = hr->live_bytes();
@ -1111,19 +1111,19 @@ double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) co
if (count == 0) {
return 0.0;
}
size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count - 1) * HeapRegion::GrainBytes;
size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count - 1) * G1HeapRegion::GrainBytes;
if (bytes_to_copy != nullptr) {
*bytes_to_copy = expected_bytes;
}
return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->in_young_only_phase());
}
double G1Policy::predict_region_copy_time_ms(HeapRegion* hr, bool for_young_only_phase) const {
double G1Policy::predict_region_copy_time_ms(G1HeapRegion* hr, bool for_young_only_phase) const {
size_t const bytes_to_copy = predict_bytes_to_copy(hr);
return _analytics->predict_object_copy_time_ms(bytes_to_copy, for_young_only_phase);
}
double G1Policy::predict_region_merge_scan_time(HeapRegion* hr, bool for_young_only_phase) const {
double G1Policy::predict_region_merge_scan_time(G1HeapRegion* hr, bool for_young_only_phase) const {
size_t card_rs_length = hr->rem_set()->occupied();
size_t scan_card_num = _analytics->predict_scan_card_num(card_rs_length, for_young_only_phase);
@ -1132,14 +1132,14 @@ double G1Policy::predict_region_merge_scan_time(HeapRegion* hr, bool for_young_o
_analytics->predict_card_scan_time_ms(scan_card_num, for_young_only_phase);
}
double G1Policy::predict_region_code_root_scan_time(HeapRegion* hr, bool for_young_only_phase) const {
double G1Policy::predict_region_code_root_scan_time(G1HeapRegion* hr, bool for_young_only_phase) const {
size_t code_root_length = hr->rem_set()->code_roots_list_length();
return
_analytics->predict_code_root_scan_time_ms(code_root_length, for_young_only_phase);
}
double G1Policy::predict_region_non_copy_time_ms(HeapRegion* hr,
double G1Policy::predict_region_non_copy_time_ms(G1HeapRegion* hr,
bool for_young_only_phase) const {
double region_elapsed_time_ms = predict_region_merge_scan_time(hr, for_young_only_phase) +
@ -1154,7 +1154,7 @@ double G1Policy::predict_region_non_copy_time_ms(HeapRegion* hr,
return region_elapsed_time_ms;
}
double G1Policy::predict_region_total_time_ms(HeapRegion* hr, bool for_young_only_phase) const {
double G1Policy::predict_region_total_time_ms(G1HeapRegion* hr, bool for_young_only_phase) const {
return
predict_region_non_copy_time_ms(hr, for_young_only_phase) +
predict_region_copy_time_ms(hr, for_young_only_phase);
@ -1175,12 +1175,12 @@ size_t G1Policy::estimate_used_young_bytes_locked() const {
uint used = _g1h->young_regions_count();
uint alloc = allocator->num_nodes();
uint full = used - MIN2(used, alloc);
size_t bytes_used = full * HeapRegion::GrainBytes;
size_t bytes_used = full * G1HeapRegion::GrainBytes;
return bytes_used + allocator->used_in_alloc_regions();
}
size_t G1Policy::desired_survivor_size(uint max_regions) const {
size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
size_t const survivor_capacity = G1HeapRegion::GrainWords * max_regions;
return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
}
@ -1337,7 +1337,7 @@ void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_se
void G1Policy::abandon_collection_set_candidates() {
// Clear remembered sets of remaining candidate regions and the actual candidate
// set.
for (HeapRegion* r : *candidates()) {
for (G1HeapRegion* r : *candidates()) {
r->rem_set()->clear(true /* only_cardset */);
}
_collection_set->abandon_all_candidates();
@ -1502,7 +1502,7 @@ double G1Policy::select_candidates_from_marking(G1CollectionCandidateList* marki
print_finish_message("Maximum number of regions reached", true);
break;
}
HeapRegion* hr = (*iter)->_r;
G1HeapRegion* hr = (*iter)->_r;
// Skip evacuating pinned marking regions because we are not getting any free
// space from them (and we expect to get free space from marking candidates).
// Also prepare to move them to retained regions to be evacuated optionally later
@ -1597,7 +1597,7 @@ void G1Policy::select_candidates_from_retained(G1CollectionCandidateList* retain
min_regions, retained_list->length(), time_remaining_ms, optional_time_remaining_ms);
for (G1CollectionSetCandidateInfo* ci : *retained_list) {
HeapRegion* r = ci->_r;
G1HeapRegion* r = ci->_r;
double predicted_time_ms = predict_region_total_time_ms(r, collector_state()->in_young_only_phase());
bool fits_in_remaining_time = predicted_time_ms <= time_remaining_ms;
// If we can't reclaim that region ignore it for now.
@ -1655,7 +1655,7 @@ void G1Policy::calculate_optional_collection_set_regions(G1CollectionCandidateRe
double total_prediction_ms = 0.0;
for (HeapRegion* r : *optional_regions) {
for (G1HeapRegion* r : *optional_regions) {
double prediction_ms = predict_region_total_time_ms(r, false);
if (prediction_ms > time_remaining_ms) {
@ -1678,10 +1678,10 @@ void G1Policy::calculate_optional_collection_set_regions(G1CollectionCandidateRe
void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
start_adding_survivor_regions();
for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
for (GrowableArrayIterator<G1HeapRegion*> it = survivors->regions()->begin();
it != survivors->regions()->end();
++it) {
HeapRegion* curr = *it;
G1HeapRegion* curr = *it;
set_region_survivor(curr);
// The region is a non-empty survivor so let's add it to

Просмотреть файл

@ -44,7 +44,7 @@
// * choice of collection set.
// * when to collect.
class HeapRegion;
class G1HeapRegion;
class G1CollectionSet;
class G1CollectionCandidateList;
class G1CollectionSetCandidates;
@ -122,12 +122,12 @@ public:
G1OldGenAllocationTracker* old_gen_alloc_tracker() { return &_old_gen_alloc_tracker; }
void set_region_eden(HeapRegion* hr) {
void set_region_eden(G1HeapRegion* hr) {
hr->set_eden();
hr->install_surv_rate_group(_eden_surv_rate_group);
}
void set_region_survivor(HeapRegion* hr) {
void set_region_survivor(G1HeapRegion* hr) {
assert(hr->is_survivor(), "pre-condition");
hr->install_surv_rate_group(_survivor_surv_rate_group);
}
@ -145,14 +145,14 @@ private:
double predict_base_time_ms(size_t pending_cards, size_t card_rs_length, size_t code_root_length) const;
// Copy time for a region is copying live data.
double predict_region_copy_time_ms(HeapRegion* hr, bool for_young_only_phase) const;
double predict_region_copy_time_ms(G1HeapRegion* hr, bool for_young_only_phase) const;
// Merge-scan time for a region is handling card-based remembered sets of that region
// (as a single unit).
double predict_region_merge_scan_time(HeapRegion* hr, bool for_young_only_phase) const;
double predict_region_merge_scan_time(G1HeapRegion* hr, bool for_young_only_phase) const;
// Code root scan time prediction for the given region.
double predict_region_code_root_scan_time(HeapRegion* hr, bool for_young_only_phase) const;
double predict_region_code_root_scan_time(G1HeapRegion* hr, bool for_young_only_phase) const;
// Non-copy time for a region is handling remembered sets and other time.
double predict_region_non_copy_time_ms(HeapRegion* hr, bool for_young_only_phase) const;
double predict_region_non_copy_time_ms(G1HeapRegion* hr, bool for_young_only_phase) const;
public:
@ -163,7 +163,7 @@ public:
double predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy = nullptr) const;
// Total time for a region is handling remembered sets (as a single unit), copying its live data
// and other time.
double predict_region_total_time_ms(HeapRegion* hr, bool for_young_only_phase) const;
double predict_region_total_time_ms(G1HeapRegion* hr, bool for_young_only_phase) const;
void cset_regions_freed() {
bool update = should_update_surv_rate_group_predictors();
@ -245,7 +245,7 @@ private:
// Limit the given desired young length to available free regions.
uint calculate_young_target_length(uint desired_young_length) const;
size_t predict_bytes_to_copy(HeapRegion* hr) const;
size_t predict_bytes_to_copy(G1HeapRegion* hr) const;
double predict_survivor_regions_evac_time() const;
double predict_retained_regions_evac_time() const;
@ -400,7 +400,7 @@ public:
void record_concurrent_refinement_stats(size_t pending_cards,
size_t thread_buffer_cards);
bool should_retain_evac_failed_region(HeapRegion* r) const {
bool should_retain_evac_failed_region(G1HeapRegion* r) const {
return should_retain_evac_failed_region(r->hrm_index());
}
bool should_retain_evac_failed_region(uint index) const;

Просмотреть файл

@ -36,7 +36,7 @@ G1RegionsOnNodes::~G1RegionsOnNodes() {
FREE_C_HEAP_ARRAY(uint, _count_per_node);
}
uint G1RegionsOnNodes::add(HeapRegion* hr) {
uint G1RegionsOnNodes::add(G1HeapRegion* hr) {
uint node_index = hr->node_index();
// Update only if the node index is valid.

Просмотреть файл

@ -28,7 +28,7 @@
#include "memory/allocation.hpp"
class G1NUMA;
class HeapRegion;
class G1HeapRegion;
// Contains per node index region count
class G1RegionsOnNodes : public StackObj {
@ -41,7 +41,7 @@ public:
~G1RegionsOnNodes();
// Increase _count_per_node for the node of given heap region and returns node index.
uint add(HeapRegion* hr);
uint add(G1HeapRegion* hr);
void clear();

Просмотреть файл

@ -92,7 +92,7 @@ class G1RemSetScanState : public CHeapObj<mtGC> {
size_t _max_reserved_regions;
// Card table iteration claim for each heap region, from 0 (completely unscanned)
// to (>=) HeapRegion::CardsPerRegion (completely scanned).
// to (>=) G1HeapRegion::CardsPerRegion (completely scanned).
uint volatile* _card_table_scan_state;
uint _scan_chunks_per_region; // Number of chunks per region.
@ -223,7 +223,7 @@ private:
return AlmostNoWork;
}
double num_cards = num_regions << HeapRegion::LogCardsPerRegion;
double num_cards = num_regions << G1HeapRegion::LogCardsPerRegion;
return ceil(num_cards / num_cards_per_worker);
}
@ -235,14 +235,14 @@ private:
}
void do_work(uint worker_id) override {
const uint num_regions_per_worker = num_cards_per_worker / (uint)HeapRegion::CardsPerRegion;
const uint num_regions_per_worker = num_cards_per_worker / (uint)G1HeapRegion::CardsPerRegion;
while (_cur_dirty_regions < _regions->size()) {
uint next = Atomic::fetch_then_add(&_cur_dirty_regions, num_regions_per_worker);
uint max = MIN2(next + num_regions_per_worker, _regions->size());
for (uint i = next; i < max; i++) {
HeapRegion* r = _g1h->region_at(_regions->at(i));
G1HeapRegion* r = _g1h->region_at(_regions->at(i));
r->clear_cardtable();
}
}
@ -276,7 +276,7 @@ public:
_num_total_scan_chunks = max_reserved_regions * _scan_chunks_per_region;
_region_scan_chunks = NEW_C_HEAP_ARRAY(bool, _num_total_scan_chunks, mtGC);
_scan_chunks_shift = (uint8_t)log2i(HeapRegion::CardsPerRegion / _scan_chunks_per_region);
_scan_chunks_shift = (uint8_t)log2i(G1HeapRegion::CardsPerRegion / _scan_chunks_per_region);
_scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_reserved_regions, mtGC);
}
@ -317,7 +317,7 @@ public:
// - are located in free regions
// as we do not clean up remembered sets before merging heap roots.
bool contains_cards_to_process(uint const region_idx) const {
HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx);
G1HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx);
return (hr != nullptr && !hr->in_collection_set() && hr->is_old_or_humongous());
}
@ -328,11 +328,11 @@ public:
result++;
}
}
return result * (HeapRegion::CardsPerRegion / _scan_chunks_per_region);
return result * (G1HeapRegion::CardsPerRegion / _scan_chunks_per_region);
}
size_t num_cards_in_dirty_regions() const {
return _next_dirty_regions->size() * HeapRegion::CardsPerRegion;
return _next_dirty_regions->size() * G1HeapRegion::CardsPerRegion;
}
void set_chunk_range_dirty(size_t const region_card_idx, size_t const card_length) {
@ -392,7 +392,7 @@ public:
bool has_cards_to_scan(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
return _card_table_scan_state[region] < HeapRegion::CardsPerRegion;
return _card_table_scan_state[region] < G1HeapRegion::CardsPerRegion;
}
uint claim_cards_to_scan(uint region, uint increment) {
@ -402,7 +402,7 @@ public:
void add_dirty_region(uint const region) {
#ifdef ASSERT
HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
G1HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
assert(!hr->in_collection_set() && hr->is_old_or_humongous(),
"Region %u is not suitable for scanning, is %sin collection set or %s",
hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str());
@ -412,7 +412,7 @@ public:
void add_all_dirty_region(uint region) {
#ifdef ASSERT
HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
G1HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
assert(hr->in_collection_set(),
"Only add collection set regions to all dirty regions directly but %u is %s",
hr->hrm_index(), hr->get_short_type_str());
@ -461,13 +461,13 @@ public:
_scan_state(scan_state),
_region_idx(region_idx),
_cur_claim(0) {
guarantee(size() <= HeapRegion::CardsPerRegion, "Should not claim more space than possible.");
guarantee(size() <= G1HeapRegion::CardsPerRegion, "Should not claim more space than possible.");
}
bool has_next() {
while (true) {
_cur_claim = _scan_state->claim_cards_to_scan(_region_idx, size());
if (_cur_claim >= HeapRegion::CardsPerRegion) {
if (_cur_claim >= G1HeapRegion::CardsPerRegion) {
return false;
}
if (_scan_state->chunk_needs_scan(_region_idx, _cur_claim)) {
@ -509,7 +509,7 @@ class G1ScanHRForRegionClosure : public HeapRegionClosure {
CardValue _scanned_card_value;
HeapWord* scan_memregion(uint region_idx_for_card, MemRegion mr) {
HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
G1HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
G1ScanCardClosure card_cl(_g1h, _pss, _heap_roots_found);
HeapWord* const scanned_to = card_region->oops_on_memregion_seq_iterate_careful<true>(mr, &card_cl);
@ -639,7 +639,7 @@ class G1ScanHRForRegionClosure : public HeapRegionClosure {
}
};
void scan_heap_roots(HeapRegion* r) {
void scan_heap_roots(G1HeapRegion* r) {
uint const region_idx = r->hrm_index();
ResourceMark rm;
@ -655,7 +655,7 @@ class G1ScanHRForRegionClosure : public HeapRegionClosure {
while (claim.has_next()) {
_chunks_claimed++;
size_t const region_card_base_idx = ((size_t)region_idx << HeapRegion::LogCardsPerRegion) + claim.value();
size_t const region_card_base_idx = ((size_t)region_idx << G1HeapRegion::LogCardsPerRegion) + claim.value();
CardValue* const start_card = _ct->byte_for_index(region_card_base_idx);
CardValue* const end_card = start_card + claim.size();
@ -690,7 +690,7 @@ public:
: G1CardTable::clean_card_val()) {
}
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
assert(!r->in_collection_set() && r->is_old_or_humongous(),
"Should only be called on old gen non-collection set regions but region %u is not.",
r->hrm_index());
@ -776,7 +776,7 @@ class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
Tickspan _rem_set_opt_root_scan_time;
Tickspan _rem_set_opt_trim_partially_time;
void scan_opt_rem_set_roots(HeapRegion* r) {
void scan_opt_rem_set_roots(G1HeapRegion* r) {
G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
G1ScanCardClosure scan_cl(G1CollectedHeap::heap(), _pss, _opt_roots_scanned);
@ -805,7 +805,7 @@ public:
_rem_set_opt_root_scan_time(),
_rem_set_opt_trim_partially_time() { }
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
// The individual references for the optional remembered set are per-worker, so we
// always need to scan them.
if (r->has_index_in_opt_cset()) {
@ -880,7 +880,7 @@ void G1RemSet::assert_scan_top_is_null(uint hrm_index) {
}
#endif
void G1RemSet::prepare_region_for_scan(HeapRegion* r) {
void G1RemSet::prepare_region_for_scan(G1HeapRegion* r) {
uint hrm_index = r->hrm_index();
r->prepare_remset_for_scan();
@ -1044,7 +1044,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
bool start_iterate(uint const tag, uint const region_idx) {
assert(tag < G1GCPhaseTimes::MergeRSCards, "invalid tag %u", tag);
if (remember_if_interesting(region_idx)) {
_region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
_region_base_idx = (size_t)region_idx << G1HeapRegion::LogCardsPerRegion;
_stats.inc_card_set_merged(tag);
return true;
}
@ -1071,7 +1071,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
// pass) or the "dirty" list will be merged with the "all" list later otherwise.
// (And there is no problem either way if the region does not contain dirty
// cards).
void merge_card_set_for_region(HeapRegion* r) {
void merge_card_set_for_region(G1HeapRegion* r) {
assert(r->in_collection_set() || r->is_starts_humongous(), "must be");
HeapRegionRemSet* rem_set = r->rem_set();
@ -1080,7 +1080,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
}
}
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
assert(r->in_collection_set(), "must be");
_scan_state->add_all_dirty_region(r->hrm_index());
@ -1101,12 +1101,12 @@ class G1MergeHeapRootsTask : public WorkerTask {
class G1ClearBitmapClosure : public HeapRegionClosure {
G1CollectedHeap* _g1h;
void assert_bitmap_clear(HeapRegion* hr, const G1CMBitMap* bitmap) {
void assert_bitmap_clear(G1HeapRegion* hr, const G1CMBitMap* bitmap) {
assert(bitmap->get_next_marked_addr(hr->bottom(), hr->end()) == hr->end(),
"Bitmap should have no mark for region %u (%s)", hr->hrm_index(), hr->get_short_type_str());
}
bool should_clear_region(HeapRegion* hr) const {
bool should_clear_region(G1HeapRegion* hr) const {
// The bitmap for young regions must obviously be clear as we never mark through them;
// old regions that are currently being marked through are only in the collection set
// after the concurrent cycle completed, so their bitmaps must also be clear except when
@ -1126,7 +1126,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
public:
G1ClearBitmapClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
bool do_heap_region(HeapRegion* hr) {
bool do_heap_region(G1HeapRegion* hr) {
assert(_g1h->is_in_cset(hr), "Should only be used iterating the collection set");
// Evacuation failure uses the bitmap to record evacuation failed objects,
@ -1152,7 +1152,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
_closure1(cl1),
_closure2(cl2) { }
bool do_heap_region(HeapRegion* hr) {
bool do_heap_region(G1HeapRegion* hr) {
return _closure1->do_heap_region(hr) ||
_closure2->do_heap_region(hr);
}
@ -1173,7 +1173,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
return false;
}
HeapRegion* r = g1h->region_at(region_index);
G1HeapRegion* r = g1h->region_at(region_index);
assert(r->rem_set()->is_complete(), "humongous candidates must have complete remset");
@ -1413,7 +1413,7 @@ void G1RemSet::print_merge_heap_roots_stats() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
size_t total_old_region_cards =
(g1h->num_regions() - (g1h->num_free_regions() - g1h->collection_set()->cur_length())) * HeapRegion::CardsPerRegion;
(g1h->num_regions() - (g1h->num_free_regions() - g1h->collection_set()->cur_length())) * G1HeapRegion::CardsPerRegion;
ls.print_cr("Visited cards " SIZE_FORMAT " Total dirty " SIZE_FORMAT " (%.2lf%%) Total old " SIZE_FORMAT " (%.2lf%%)",
num_visited_cards,
@ -1496,7 +1496,7 @@ bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) {
// Find the start address represented by the card.
HeapWord* start = _ct->addr_for(card_ptr);
// And find the region containing it.
HeapRegion* r = _g1h->heap_region_containing_or_null(start);
G1HeapRegion* r = _g1h->heap_region_containing_or_null(start);
// If this is a (stale) card into an uncommitted region, exit.
if (r == nullptr) {
@ -1570,7 +1570,7 @@ void G1RemSet::refine_card_concurrently(CardValue* const card_ptr,
// Construct the MemRegion representing the card.
HeapWord* start = _ct->addr_for(card_ptr);
// And find the region containing it.
HeapRegion* r = _g1h->heap_region_containing(start);
G1HeapRegion* r = _g1h->heap_region_containing(start);
// This reload of the top is safe even though it happens after the full
// fence, because top is stable for old and unfiltered humongous
// regions, so it must return the same value as the previous load when

Просмотреть файл

@ -108,7 +108,7 @@ public:
void exclude_region_from_scan(uint region_idx);
// Creates a snapshot of the current _top values at the start of collection to
// filter out card marks that we do not want to scan.
void prepare_region_for_scan(HeapRegion* region);
void prepare_region_for_scan(G1HeapRegion* region);
// Do work for regions in the current increment of the collection set, scanning
// non-card based (heap) roots.

Просмотреть файл

@ -190,23 +190,23 @@ private:
RegionTypeCounter _all;
size_t _max_rs_mem_sz;
HeapRegion* _max_rs_mem_sz_region;
G1HeapRegion* _max_rs_mem_sz_region;
size_t total_rs_unused_mem_sz() const { return _all.rs_unused_mem_size(); }
size_t total_rs_mem_sz() const { return _all.rs_mem_size(); }
size_t total_cards_occupied() const { return _all.cards_occupied(); }
size_t max_rs_mem_sz() const { return _max_rs_mem_sz; }
HeapRegion* max_rs_mem_sz_region() const { return _max_rs_mem_sz_region; }
G1HeapRegion* max_rs_mem_sz_region() const { return _max_rs_mem_sz_region; }
size_t _max_code_root_mem_sz;
HeapRegion* _max_code_root_mem_sz_region;
G1HeapRegion* _max_code_root_mem_sz_region;
size_t total_code_root_mem_sz() const { return _all.code_root_mem_size(); }
size_t total_code_root_elems() const { return _all.code_root_elems(); }
size_t max_code_root_mem_sz() const { return _max_code_root_mem_sz; }
HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; }
G1HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; }
public:
HRRSStatsIter() : _young("Young"), _humongous("Humongous"),
@ -215,7 +215,7 @@ public:
_max_code_root_mem_sz(0), _max_code_root_mem_sz_region(nullptr)
{}
bool do_heap_region(HeapRegion* r) {
bool do_heap_region(G1HeapRegion* r) {
HeapRegionRemSet* hrrs = r->rem_set();
// HeapRegionRemSet::mem_size() includes the

Просмотреть файл

@ -30,7 +30,7 @@
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
#include "runtime/safepoint.hpp"
bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const {
bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(G1HeapRegion* r) const {
// All non-free and non-young regions need to be scanned for references;
// At every gc we gather references to other regions in young.
// Free regions trivially do not need scanning because they do not contain live
@ -38,7 +38,7 @@ bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const {
return !(r->is_young() || r->is_free());
}
void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) {
void G1RemSetTrackingPolicy::update_at_allocate(G1HeapRegion* r) {
assert(r->is_young() || r->is_humongous() || r->is_old(),
"Region %u with unexpected heap region type %s", r->hrm_index(), r->get_type_str());
if (r->is_old()) {
@ -51,11 +51,11 @@ void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) {
r->rem_set()->set_state_complete();
}
void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) {
void G1RemSetTrackingPolicy::update_at_free(G1HeapRegion* r) {
/* nothing to do */
}
bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r) {
bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(G1HeapRegion* r) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(r->is_starts_humongous(), "Region %u should be Humongous", r->hrm_index());
@ -66,7 +66,7 @@ bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r) {
// support eager-reclaim. However, their remset state can be reset after
// Full-GC. Try to re-enable remset-tracking for them if possible.
if (cast_to_oop(r->bottom())->is_typeArray() && !r->rem_set()->is_tracked()) {
auto on_humongous_region = [] (HeapRegion* r) {
auto on_humongous_region = [] (G1HeapRegion* r) {
r->rem_set()->set_state_updating();
};
G1CollectedHeap::heap()->humongous_obj_regions_iterate(r, on_humongous_region);
@ -76,7 +76,7 @@ bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r) {
return selected_for_rebuild;
}
bool G1RemSetTrackingPolicy::update_old_before_rebuild(HeapRegion* r) {
bool G1RemSetTrackingPolicy::update_old_before_rebuild(G1HeapRegion* r) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(r->is_old(), "Region %u should be Old", r->hrm_index());
@ -93,7 +93,7 @@ bool G1RemSetTrackingPolicy::update_old_before_rebuild(HeapRegion* r) {
return selected_for_rebuild;
}
void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
void G1RemSetTrackingPolicy::update_after_rebuild(G1HeapRegion* r) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
if (r->is_old_or_humongous()) {
@ -107,7 +107,7 @@ void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
if (r->is_starts_humongous() && !g1h->is_potential_eager_reclaim_candidate(r)) {
// Handle HC regions with the HS region.
g1h->humongous_obj_regions_iterate(r,
[&] (HeapRegion* r) {
[&] (G1HeapRegion* r) {
assert(!r->is_continues_humongous() || r->rem_set()->is_empty(),
"Continues humongous region %u remset should be empty", r->hrm_index());
r->rem_set()->clear(true /* only_cardset */);

Просмотреть файл

@ -36,22 +36,22 @@ class G1RemSetTrackingPolicy : public CHeapObj<mtGC> {
public:
// Do we need to scan the given region to get all outgoing references for remembered
// set rebuild?
bool needs_scan_for_rebuild(HeapRegion* r) const;
bool needs_scan_for_rebuild(G1HeapRegion* r) const;
// Update remembered set tracking state at allocation of the region. May be
// called at any time. The caller makes sure that the changes to the remembered
// set state are visible to other threads.
void update_at_allocate(HeapRegion* r);
void update_at_allocate(G1HeapRegion* r);
// Update remembered set tracking state for humongous regions before we are going to
// rebuild remembered sets. Called at safepoint in the remark pause.
bool update_humongous_before_rebuild(HeapRegion* r);
bool update_humongous_before_rebuild(G1HeapRegion* r);
// Update remembered set tracking state for old regions before we are going
// to rebuild remembered sets. Called at safepoint in the remark pause.
bool update_old_before_rebuild(HeapRegion* r);
bool update_old_before_rebuild(G1HeapRegion* r);
// Update remembered set tracking state after rebuild is complete, i.e. the cleanup
// pause. Called at safepoint.
void update_after_rebuild(HeapRegion* r);
void update_after_rebuild(G1HeapRegion* r);
// Update remembered set tracking state when the region is freed.
void update_at_free(HeapRegion* r);
void update_at_free(G1HeapRegion* r);
};
#endif // SHARE_GC_G1_G1REMSETTRACKINGPOLICY_HPP

Просмотреть файл

@ -85,7 +85,7 @@ void G1SurvRateGroup::stop_adding_regions() {
void G1SurvRateGroup::record_surviving_words(uint age, size_t surv_words) {
assert(is_valid_age(age), "age is %u not between 0 and %u", age, _num_added_regions);
double surv_rate = (double)surv_words / HeapRegion::GrainWords;
double surv_rate = (double)surv_words / G1HeapRegion::GrainWords;
_surv_rate_predictors[age]->add(surv_rate);
}

Просмотреть файл

@ -29,11 +29,11 @@
#include "utilities/debug.hpp"
G1SurvivorRegions::G1SurvivorRegions() :
_regions(new (mtGC) GrowableArray<HeapRegion*>(8, mtGC)),
_regions(new (mtGC) GrowableArray<G1HeapRegion*>(8, mtGC)),
_used_bytes(0),
_regions_on_node() {}
uint G1SurvivorRegions::add(HeapRegion* hr) {
uint G1SurvivorRegions::add(G1HeapRegion* hr) {
assert(hr->is_survivor(), "should be flagged as survivor region");
_regions->append(hr);
return _regions_on_node.add(hr);
@ -48,10 +48,10 @@ uint G1SurvivorRegions::regions_on_node(uint node_index) const {
}
void G1SurvivorRegions::convert_to_eden() {
for (GrowableArrayIterator<HeapRegion*> it = _regions->begin();
for (GrowableArrayIterator<G1HeapRegion*> it = _regions->begin();
it != _regions->end();
++it) {
HeapRegion* hr = *it;
G1HeapRegion* hr = *it;
hr->set_eden_pre_gc();
}
clear();

Просмотреть файл

@ -30,18 +30,18 @@
template <typename T>
class GrowableArray;
class HeapRegion;
class G1HeapRegion;
class G1SurvivorRegions {
private:
GrowableArray<HeapRegion*>* _regions;
GrowableArray<G1HeapRegion*>* _regions;
volatile size_t _used_bytes;
G1RegionsOnNodes _regions_on_node;
public:
G1SurvivorRegions();
uint add(HeapRegion* hr);
uint add(G1HeapRegion* hr);
void convert_to_eden();
@ -50,7 +50,7 @@ public:
uint length() const;
uint regions_on_node(uint node_index) const;
const GrowableArray<HeapRegion*>* regions() const {
const GrowableArray<G1HeapRegion*>* regions() const {
return _regions;
}

Просмотреть файл

@ -83,16 +83,16 @@ void G1UncommitRegionTask::report_execution(Tickspan time, uint regions) {
_summary_duration += time;
log_trace(gc, heap)("Concurrent Uncommit: " SIZE_FORMAT "%s, %u regions, %1.3fms",
byte_size_in_proper_unit(regions * HeapRegion::GrainBytes),
proper_unit_for_byte_size(regions * HeapRegion::GrainBytes),
byte_size_in_proper_unit(regions * G1HeapRegion::GrainBytes),
proper_unit_for_byte_size(regions * G1HeapRegion::GrainBytes),
regions,
time.seconds() * 1000);
}
void G1UncommitRegionTask::report_summary() {
log_debug(gc, heap)("Concurrent Uncommit Summary: " SIZE_FORMAT "%s, %u regions, %1.3fms",
byte_size_in_proper_unit(_summary_region_count * HeapRegion::GrainBytes),
proper_unit_for_byte_size(_summary_region_count * HeapRegion::GrainBytes),
byte_size_in_proper_unit(_summary_region_count * G1HeapRegion::GrainBytes),
proper_unit_for_byte_size(_summary_region_count * G1HeapRegion::GrainBytes),
_summary_region_count,
_summary_duration.seconds() * 1000);
}

Просмотреть файл

@ -261,7 +261,7 @@ void G1YoungCollector::wait_for_root_region_scanning() {
class G1PrintCollectionSetClosure : public HeapRegionClosure {
public:
virtual bool do_heap_region(HeapRegion* r) {
virtual bool do_heap_region(G1HeapRegion* r) {
G1HeapRegionPrinter::cset(r);
return false;
}
@ -294,7 +294,7 @@ class G1PrepareEvacuationTask : public WorkerTask {
G1MonotonicArenaMemoryStats _card_set_stats;
void sample_card_set_size(HeapRegion* hr) {
void sample_card_set_size(G1HeapRegion* hr) {
// Sample card set sizes for young gen and humongous before GC: this makes
// the policy to give back memory to the OS keep the most recent amount of
// memory for these regions.
@ -303,7 +303,7 @@ class G1PrepareEvacuationTask : public WorkerTask {
}
}
bool humongous_region_is_candidate(HeapRegion* region) const {
bool humongous_region_is_candidate(G1HeapRegion* region) const {
assert(region->is_starts_humongous(), "Must start a humongous object");
oop obj = cast_to_oop(region->bottom());
@ -375,7 +375,7 @@ class G1PrepareEvacuationTask : public WorkerTask {
_parent_task->add_humongous_total(_worker_humongous_total);
}
virtual bool do_heap_region(HeapRegion* hr) {
virtual bool do_heap_region(G1HeapRegion* hr) {
// First prepare the region for scanning
_g1h->rem_set()->prepare_region_for_scan(hr);
@ -968,7 +968,7 @@ void G1YoungCollector::enqueue_candidates_as_root_regions() {
assert(collector_state()->in_concurrent_start_gc(), "must be");
G1CollectionSetCandidates* candidates = collection_set()->candidates();
for (HeapRegion* r : *candidates) {
for (G1HeapRegion* r : *candidates) {
_g1h->concurrent_mark()->add_root_region(r);
}
}

Просмотреть файл

@ -39,7 +39,7 @@ public:
_allocation_failure_regions(allocation_failure_regions),
_allocation_failure_regions_num(cset_length * G1GCAllocationFailureALotCSetPercent / 100) { }
bool do_heap_region(HeapRegion* r) override {
bool do_heap_region(G1HeapRegion* r) override {
assert(r->in_collection_set(), "must be");
if (_allocation_failure_regions_num > 0) {
_allocation_failure_regions.set_bit(r->hrm_index());

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше