本文整理汇总了C++中ParallelScavengeHeap::total_collections方法的典型用法代码示例。如果您正苦于以下问题:C++ ParallelScavengeHeap::total_collections方法的具体用法?C++ ParallelScavengeHeap::total_collections怎么用?C++ ParallelScavengeHeap::total_collections使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ParallelScavengeHeap
的用法示例。
在下文中一共展示了ParallelScavengeHeap::total_collections方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: resize
void PSOldGen::resize(size_t desired_free_space) {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
const size_t alignment = heap->min_alignment();
const size_t size_before = _virtual_space.committed_size();
size_t new_size = used_in_bytes() + desired_free_space;
new_size = align_size_up(new_size, alignment);
assert(_max_gen_size == reserved().byte_size(), "max new size problem?");
// Adjust according to our min and max
new_size = MAX2(MIN2(new_size, _max_gen_size), _min_gen_size);
const size_t current_size = capacity_in_bytes();
if (new_size == current_size) {
// No change requested
return;
}
if (new_size > current_size) {
size_t change_bytes = new_size - current_size;
expand(change_bytes);
} else {
size_t change_bytes = current_size - new_size;
// shrink doesn't grab this lock, expand does. Is that right?
MutexLocker x(ExpandHeap_lock);
shrink(change_bytes);
}
if (PrintAdaptiveSizePolicy) {
gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
"collection: %d "
"(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
heap->total_collections(),
size_before, _virtual_space.committed_size());
}
}
示例2: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSScavenge::invoke_no_policy() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
elapsedTimer scavenge_time;
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
scavenge_entry.update();
if (GC_locker::check_active_before_gc()) {
return false;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
return false;
}
bool promotion_failure_occurred = false;
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
heap->increment_total_collections();
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if ((gc_cause != GCCause::_java_lang_system_gc) ||
UseAdaptiveSizePolicyWithSystemGC) {
// Gather the feedback data for eden occupancy.
young_gen->eden_space()->accumulate_statistics();
}
// We need to track unique scavenge invocations as well.
_total_invocations++;
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
assert(!NeverTenure||_tenuring_threshold==markWord::max_age+1,"Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
assert(promotion_failed() == false, "Sanity");
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
ResourceMark rm;
HandleMark hm;
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(false /* not full GC */);
if (TraceGen0Time) scavenge_time.start();
// Let the size policy know we're starting
size_policy->minor_collection_begin();
// Verify the object start arrays.
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
}
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
CardTableExtension::verify_all_young_refs_imprecise();
}
if (!ScavengeWithObjectsInToSpace) {
assert(young_gen->to_space()->is_empty(),
"Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear();
} else if (ZapUnusedHeapArea) {
young_gen->to_space()->mangle_unused_area();
}
save_to_space_top_before_gc();
//.........这里部分代码省略.........
示例3: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
if (GC_locker::check_active_before_gc()) {
return;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Increment the invocation count
heap->increment_total_collections(true /* full */);
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
// Verify object start arrays
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
}
heap->pre_full_gc_dump();
// Filled in below to track the state of the young gen after the collection.
bool eden_empty;
bool survivors_empty;
bool young_gen_empty;
{
HandleMark hm;
const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
// This is useful for debugging but don't change the output the
// the customer sees.
const char* gc_cause_str = "Full GC";
if (is_system_gc && PrintGCDetails) {
gc_cause_str = "Full GC (System)";
}
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */);
if (TraceGen1Time) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->major_collection_begin();
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
CodeCache::gc_prologue();
Threads::gc_prologue();
BiasedLocking::preserve_marks();
// Capture heap size before collection for printing.
size_t prev_used = heap->used();
// Capture perm gen size before collection for sizing.
size_t perm_gen_prev_used = perm_gen->used_in_bytes();
// For PrintGCDetails
size_t old_gen_prev_used = old_gen->used_in_bytes();
size_t young_gen_prev_used = young_gen->used_in_bytes();
allocate_stacks();
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
//.........这里部分代码省略.........
示例4: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
if (GC_locker::check_active_before_gc()) {
return false;
}
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCause::Cause gc_cause = heap->gc_cause();
_gc_timer->register_gc_start();
_gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
// CollectorPolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
// Increment the invocation count
heap->increment_total_collections(true /* full */);
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
heap->print_heap_before_gc();
heap->trace_heap_before_gc(_gc_tracer);
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify(" VerifyBeforeGC:");
}
// Verify object start arrays
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
}
heap->pre_full_gc_dump(_gc_timer);
// Filled in below to track the state of the young gen after the collection.
bool eden_empty;
bool survivors_empty;
bool young_gen_empty;
{
HandleMark hm;
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
if (TraceOldGenTime) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->major_collection_begin();
CodeCache::gc_prologue();
BiasedLocking::preserve_marks();
// Capture heap size before collection for printing.
size_t prev_used = heap->used();
// Capture metadata size before collection for sizing.
size_t metadata_prev_used = MetaspaceAux::used_bytes();
// For PrintGCDetails
size_t old_gen_prev_used = old_gen->used_in_bytes();
size_t young_gen_prev_used = young_gen->used_in_bytes();
allocate_stacks();
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
ref_processor()->setup_policy(clear_all_softrefs);
mark_sweep_phase1(clear_all_softrefs);
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
//.........这里部分代码省略.........
示例5: compute_new_size
void PSPermGen::compute_new_size(size_t used_before_collection) {
// Update our padded average of objects allocated in perm
// gen between collections.
assert(used_before_collection >= _last_used,
"negative allocation amount since last GC?");
const size_t alloc_since_last_gc = used_before_collection - _last_used;
_avg_size->sample(alloc_since_last_gc);
const size_t current_live = used_in_bytes();
// Stash away the current amount live for the next call to this method.
_last_used = current_live;
// We have different alignment constraints than the rest of the heap.
const size_t alignment = MAX2(MinPermHeapExpansion,
virtual_space()->alignment());
// Compute the desired size:
// The free space is the newly computed padded average,
// so the desired size is what's live + the free space.
size_t desired_size = current_live + (size_t)_avg_size->padded_average();
desired_size = align_size_up(desired_size, alignment);
// ...and no larger or smaller than our max and min allowed.
desired_size = MAX2(MIN2(desired_size, _max_gen_size), _min_gen_size);
assert(desired_size <= _max_gen_size, "just checking");
const size_t size_before = _virtual_space->committed_size();
if (desired_size == size_before) {
// no change, we're done
return;
}
{
// We'll be growing or shrinking the heap: in either case,
// we need to hold a lock.
MutexLocker x(ExpandHeap_lock);
if (desired_size > size_before) {
const size_t change_bytes = desired_size - size_before;
const size_t aligned_change_bytes =
align_size_up(change_bytes, alignment);
expand_by(aligned_change_bytes);
} else {
// Shrinking
const size_t change_bytes =
size_before - desired_size;
const size_t aligned_change_bytes = align_size_down(change_bytes, alignment);
shrink(aligned_change_bytes);
}
}
// While this code isn't controlled by AdaptiveSizePolicy, it's
// convenient to see all resizing decsions under the same flag.
if (PrintAdaptiveSizePolicy) {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
gclog_or_tty->print_cr("AdaptiveSizePolicy::perm generation size: "
"collection: %d "
"(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
heap->total_collections(),
size_before, _virtual_space->committed_size());
}
}
示例6: resize_spaces
//.........这里部分代码省略.........
// Don't let eden shrink down to 0 or less.
eden_end = MAX2(eden_end, eden_start + alignment);
to_start = MAX2(to_start, eden_end);
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" [eden_start .. eden_end): "
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
eden_start,
eden_end,
pointer_delta(eden_end, eden_start, sizeof(char)));
gclog_or_tty->print_cr(" [ to_start .. to_end): "
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
to_start,
to_end,
pointer_delta( to_end, to_start, sizeof(char)));
gclog_or_tty->print_cr(" [from_start .. from_end): "
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
from_start,
from_end,
pointer_delta(from_end, from_start, sizeof(char)));
}
}
guarantee((HeapWord*)from_start <= from_space()->bottom(),
"from start moved to the right");
guarantee((HeapWord*)from_end >= from_space()->top(),
"from end moved into live data");
assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
assert(is_object_aligned((intptr_t)from_start), "checking alignment");
assert(is_object_aligned((intptr_t)to_start), "checking alignment");
MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
// Let's make sure the call to initialize doesn't reset "top"!
HeapWord* old_from_top = from_space()->top();
// For PrintAdaptiveSizePolicy block below
size_t old_from = from_space()->capacity_in_bytes();
size_t old_to = to_space()->capacity_in_bytes();
if (ZapUnusedHeapArea) {
// NUMA is a special case because a numa space is not mangled
// in order to not prematurely bind its address to memory to
// the wrong memory (i.e., don't want the GC thread to first
// touch the memory). The survivor spaces are not numa
// spaces and are mangled.
if (UseNUMA) {
if (eden_from_to_order) {
mangle_survivors(from_space(), fromMR, to_space(), toMR);
} else {
mangle_survivors(to_space(), toMR, from_space(), fromMR);
}
}
// If not mangling the spaces, do some checking to verify that
// the spaces are already mangled.
// The spaces should be correctly mangled at this point so
// do some checking here. Note that they are not being mangled
// in the calls to initialize().
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into an area
// covered by another space and a failure of the check may
// not correctly indicate which space is not properly mangled.
HeapWord* limit = (HeapWord*) virtual_space()->high();
eden_space()->check_mangled_unused_area(limit);
from_space()->check_mangled_unused_area(limit);
to_space()->check_mangled_unused_area(limit);
}
// When an existing space is being initialized, it is not
// mangled because the space has been previously mangled.
eden_space()->initialize(edenMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
to_space()->initialize(toMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
from_space()->initialize(fromMR,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
assert(from_space()->top() == old_from_top, "from top changed!");
if (PrintAdaptiveSizePolicy) {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
"collection: %d "
"(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
"(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
heap->total_collections(),
old_from, old_to,
from_space()->capacity_in_bytes(),
to_space()->capacity_in_bytes());
gclog_or_tty->cr();
}
}
示例7: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSScavenge::invoke_no_policy() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(_preserved_mark_stack.is_empty(), "should be empty");
assert(_preserved_oop_stack.is_empty(), "should be empty");
_gc_timer.register_gc_start();
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
scavenge_entry.update();
if (GC_locker::check_active_before_gc()) {
return false;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
return false;
}
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
bool promotion_failure_occurred = false;
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
heap->increment_total_collections();
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if ((gc_cause != GCCause::_java_lang_system_gc) ||
UseAdaptiveSizePolicyWithSystemGC) {
// Gather the feedback data for eden occupancy.
young_gen->eden_space()->accumulate_statistics();
}
if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
}
heap->print_heap_before_gc();
heap->trace_heap_before_gc(&_gc_tracer);
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify(" VerifyBeforeGC:");
}
{
ResourceMark rm;
HandleMark hm;
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
if (TraceGen0Time) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->minor_collection_begin();
// Verify the object start arrays.
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
}
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
CardTableExtension::verify_all_young_refs_imprecise();
}
if (!ScavengeWithObjectsInToSpace) {
assert(young_gen->to_space()->is_empty(),
"Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear(SpaceDecorator::Mangle);
//.........这里部分代码省略.........
示例8: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool& notify_ref_lock, bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
if (GC_locker::is_active()) return;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Increment the invocation count
heap->increment_total_collections();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
if (PrintHeapAtGC) {
gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
Universe::print();
}
// Fill in TLABs
heap->ensure_parseability();
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
HandleMark hm;
TraceTime t1("Full GC", PrintGC, true, gclog_or_tty);
TraceCollectorStats tcs(counters());
if (TraceGen1Time) accumulated_time()->start();
// Let the size policy know we're starting
AdaptiveSizePolicy* size_policy = heap->size_policy();
size_policy->major_collection_begin();
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
NOT_CORE(CodeCache::gc_prologue());
Threads::gc_prologue();
// Capture heap size before collection for printing.
size_t prev_used = heap->used();
// Capture perm gen size before collection for sizing.
size_t perm_gen_prev_used = perm_gen->used_in_bytes();
bool marked_for_unloading = false;
allocate_stacks();
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
COMPILER2_ONLY(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
COMPILER2_ONLY(assert(DerivedPointerTable::is_active(), "Sanity"));
COMPILER2_ONLY(DerivedPointerTable::set_active(false));
mark_sweep_phase3();
mark_sweep_phase4();
restore_marks();
deallocate_stacks();
// "free at last gc" is calculated from these.
Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
Universe::set_heap_used_at_last_gc(Universe::heap()->used());
bool all_empty = young_gen->eden_space()->is_empty() &&
young_gen->from_space()->is_empty() &&
young_gen->to_space()->is_empty();
BarrierSet* bs = heap->barrier_set();
if (bs->is_a(BarrierSet::ModRef)) {
ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
MemRegion old_mr = heap->old_gen()->reserved();
MemRegion perm_mr = heap->perm_gen()->reserved();
assert(old_mr.end() <= perm_mr.start(), "Generations out of order");
if (all_empty) {
modBS->clear(MemRegion(old_mr.start(), perm_mr.end()));
} else {
modBS->invalidate(MemRegion(old_mr.start(), perm_mr.end()));
//.........这里部分代码省略.........
示例9: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSScavenge::invoke_no_policy(bool& notify_ref_lock) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
scavenge_entry.update();
if (GC_locker::is_active()) return;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
return;
}
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
AdaptiveSizePolicy* size_policy = heap->size_policy();
heap->increment_total_collections();
if (PrintHeapAtGC){
gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
Universe::print();
}
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
assert(promotion_failed() == false, "Sanity");
// Fill in TLABs
heap->ensure_parseability();
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
ResourceMark rm;
HandleMark hm;
TraceTime t1("GC", PrintGC, true, gclog_or_tty);
TraceCollectorStats tcs(counters());
if (TraceGen0Time) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->minor_collection_begin();
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
CardTableExtension::verify_all_young_refs_imprecise();
}
assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear();
NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
COMPILER2_ONLY(DerivedPointerTable::clear(););
reference_processor()->enable_discovery();
// We track how much was promoted to the next generation for
// the AdaptiveSizePolicy.
size_t old_gen_used_before = old_gen->object_space()->used_in_bytes();
// Reset our survivor overflow.
set_survivor_overflow(false);
// We need to save the old/perm top values before
// creating the promotion_manager. We pass the top
// values to the card_table, to prevent it from
// straying into the promotion labs.
HeapWord* old_top = old_gen->object_space()->top();
HeapWord* perm_top = perm_gen->object_space()->top();
// Release all previously held resources
gc_task_manager()->release_all_resources();
PSPromotionManager::pre_scavenge();
// We'll use the promotion manager again later.
PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
{
// TraceTime("Roots");
GCTaskQueue* q = GCTaskQueue::create();
//.........这里部分代码省略.........