本文整理汇总了C++中ParallelScavengeHeap::size_policy方法的典型用法代码示例。如果您正苦于以下问题:C++ ParallelScavengeHeap::size_policy方法的具体用法?C++ ParallelScavengeHeap::size_policy怎么用?C++ ParallelScavengeHeap::size_policy使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ParallelScavengeHeap
的用法示例。
在下文中一共展示了ParallelScavengeHeap::size_policy方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: invoke
// This method contains all heap specific policy for invoking mark sweep.
// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
// the heap. It will do nothing further. If we need to bail out for policy
// reasons, scavenge before full gc, or any other specialized behavior, it
// needs to be added here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
void PSMarkSweep::invoke(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!Universe::heap()->is_gc_active(), "not reentrant");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
PSAdaptiveSizePolicy* policy = heap->size_policy();
// Before each allocation/collection attempt, find out from the
// policy object if GCs are, on the whole, taking too long. If so,
// bail out without attempting a collection. The exceptions are
// for explicitly requested GC's.
if (!policy->gc_time_limit_exceeded() ||
GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
IsGCActiveMark mark;
if (ScavengeBeforeFullGC) {
PSScavenge::invoke_no_policy();
}
int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
}
}
示例2: invoke
// This method contains all heap specific policy for invoking scavenge.
// PSScavenge::invoke_no_policy() will do nothing but attempt to
// scavenge. It will not clean up after failed promotions, bail out if
// we've exceeded policy time limits, or any other special behavior.
// All such policy should be placed here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
void PSScavenge::invoke() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!Universe::heap()->is_gc_active(), "not reentrant");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
bool scavenge_was_done = PSScavenge::invoke_no_policy();
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
if (UsePerfData)
counters->update_full_follows_scavenge(0);
if (!scavenge_was_done ||
policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
if (UsePerfData)
counters->update_full_follows_scavenge(full_follows_scavenge);
GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
CollectorPolicy* cp = heap->collector_policy();
const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
if (UseParallelOldGC) {
PSParallelCompact::invoke_no_policy(clear_all_softrefs);
} else {
PSMarkSweep::invoke_no_policy(clear_all_softrefs);
}
}
}
示例3: should_attempt_scavenge
bool PSScavenge::should_attempt_scavenge() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
if (UsePerfData) {
counters->update_scavenge_skipped(not_skipped);
}
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
if (!ScavengeWithObjectsInToSpace) {
// Do not attempt to promote unless to_space is empty
if (!young_gen->to_space()->is_empty()) {
_consecutive_skipped_scavenges++;
if (UsePerfData) {
counters->update_scavenge_skipped(to_space_not_empty);
}
return false;
}
}
// Test to see if the scavenge will likely fail.
PSAdaptiveSizePolicy* policy = heap->size_policy();
// A similar test is done in the policy's should_full_GC(). If this is
// changed, decide if that test should also be changed.
size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
bool result = promotion_estimate < old_gen->free_in_bytes();
if (PrintGCDetails && Verbose) {
gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: ");
gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
" padded_average_promoted " SIZE_FORMAT
" free in old gen " SIZE_FORMAT,
(size_t) policy->average_promoted_in_bytes(),
(size_t) policy->padded_average_promoted_in_bytes(),
old_gen->free_in_bytes());
if (young_gen->used_in_bytes() <
(size_t) policy->padded_average_promoted_in_bytes()) {
gclog_or_tty->print_cr(" padded_promoted_average is greater"
" than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
}
}
if (result) {
_consecutive_skipped_scavenges = 0;
} else {
_consecutive_skipped_scavenges++;
if (UsePerfData) {
counters->update_scavenge_skipped(promoted_too_large);
}
}
return result;
}
示例4: available_for_contraction
size_t ASPSOldGen::available_for_contraction() {
size_t uncommitted_bytes = virtual_space()->uncommitted_size();
if (uncommitted_bytes != 0) {
return uncommitted_bytes;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t gen_alignment = heap->old_gen_alignment();
PSAdaptiveSizePolicy* policy = heap->size_policy();
const size_t working_size =
used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
const size_t working_aligned = align_size_up(working_size, gen_alignment);
const size_t working_or_min = MAX2(working_aligned, min_gen_size());
if (working_or_min > reserved().byte_size()) {
// If the used or minimum gen size (aligned up) is greater
// than the total reserved size, then the space available
// for contraction should (after proper alignment) be 0
return 0;
}
const size_t max_contraction =
reserved().byte_size() - working_or_min;
// Use the "increment" fraction instead of the "decrement" fraction
// to allow the other gen to expand more aggressively. The
// "decrement" fraction is conservative because its intent is to
// only reduce the footprint.
size_t result = policy->promo_increment_aligned_down(max_contraction);
// Also adjust for inter-generational alignment
size_t result_aligned = align_size_down(result, gen_alignment);
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr("\nASPSOldGen::available_for_contraction:"
" %d K / 0x%x", result_aligned/K, result_aligned);
gclog_or_tty->print_cr(" reserved().byte_size() %d K / 0x%x ",
reserved().byte_size()/K, reserved().byte_size());
size_t working_promoted = (size_t) policy->avg_promoted()->padded_average();
gclog_or_tty->print_cr(" padded promoted %d K / 0x%x",
working_promoted/K, working_promoted);
gclog_or_tty->print_cr(" used %d K / 0x%x",
used_in_bytes()/K, used_in_bytes());
gclog_or_tty->print_cr(" min_gen_size() %d K / 0x%x",
min_gen_size()/K, min_gen_size());
gclog_or_tty->print_cr(" max_contraction %d K / 0x%x",
max_contraction/K, max_contraction);
gclog_or_tty->print_cr(" without alignment %d K / 0x%x",
policy->promo_increment(max_contraction)/K,
policy->promo_increment(max_contraction));
gclog_or_tty->print_cr(" alignment 0x%x", gen_alignment);
}
assert(result_aligned <= max_contraction, "arithmetic is wrong");
return result_aligned;
}
示例5: available_for_contraction
// Return the number of bytes the young gen is willing give up.
//
// Future implementations could check the survivors and if to_space is in the
// right place (below from_space), take a chunk from to_space.
size_t ASPSYoungGen::available_for_contraction() {
size_t uncommitted_bytes = virtual_space()->uncommitted_size();
if (uncommitted_bytes != 0) {
return uncommitted_bytes;
}
if (eden_space()->is_empty()) {
// Respect the minimum size for eden and for the young gen as a whole.
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t eden_alignment = heap->intra_heap_alignment();
const size_t gen_alignment = heap->young_gen_alignment();
assert(eden_space()->capacity_in_bytes() >= eden_alignment,
"Alignment is wrong");
size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
eden_avail = align_size_down(eden_avail, gen_alignment);
assert(virtual_space()->committed_size() >= min_gen_size(),
"minimum gen size is wrong");
size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
assert(virtual_space()->is_aligned(gen_avail), "not aligned");
const size_t max_contraction = MIN2(eden_avail, gen_avail);
// See comment for ASPSOldGen::available_for_contraction()
// for reasons the "increment" fraction is used.
PSAdaptiveSizePolicy* policy = heap->size_policy();
size_t result = policy->eden_increment_aligned_down(max_contraction);
size_t result_aligned = align_size_down(result, gen_alignment);
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K",
result_aligned/K);
gclog_or_tty->print_cr(" max_contraction %d K", max_contraction/K);
gclog_or_tty->print_cr(" eden_avail %d K", eden_avail/K);
gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K);
}
return result_aligned;
}
return 0;
}
示例6: invoke
void PSMarkSweep::invoke(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCause::Cause gc_cause = heap->gc_cause();
PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
if (ScavengeBeforeFullGC) {
PSScavenge::invoke_no_policy();
}
const bool clear_all_soft_refs =
heap->collector_policy()->should_clear_all_soft_refs();
uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
}
示例7: invoke
// This method contains all heap specific policy for invoking scavenge.
// PSScavenge::invoke_no_policy() will do nothing but attempt to
// scavenge. It will not clean up after failed promotions, bail out if
// we've exceeded policy time limits, or any other special behavior.
// All such policy should be placed here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
void PSScavenge::invoke()
{
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!Universe::heap()->is_gc_active(), "not reentrant");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSAdaptiveSizePolicy* policy = heap->size_policy();
// Before each allocation/collection attempt, find out from the
// policy object if GCs are, on the whole, taking too long. If so,
// bail out without attempting a collection.
if (!policy->gc_time_limit_exceeded()) {
IsGCActiveMark mark;
bool scavenge_was_done = PSScavenge::invoke_no_policy();
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
if (UsePerfData)
counters->update_full_follows_scavenge(0);
if (!scavenge_was_done ||
policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
if (UsePerfData)
counters->update_full_follows_scavenge(full_follows_scavenge);
GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
if (UseParallelOldGC) {
PSParallelCompact::invoke_no_policy(false);
} else {
PSMarkSweep::invoke_no_policy(false);
}
}
}
}
示例8: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSScavenge::invoke_no_policy() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
elapsedTimer scavenge_time;
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
scavenge_entry.update();
if (GC_locker::check_active_before_gc()) {
return false;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
return false;
}
bool promotion_failure_occurred = false;
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
heap->increment_total_collections();
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if ((gc_cause != GCCause::_java_lang_system_gc) ||
UseAdaptiveSizePolicyWithSystemGC) {
// Gather the feedback data for eden occupancy.
young_gen->eden_space()->accumulate_statistics();
}
// We need to track unique scavenge invocations as well.
_total_invocations++;
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
assert(!NeverTenure||_tenuring_threshold==markWord::max_age+1,"Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
assert(promotion_failed() == false, "Sanity");
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
ResourceMark rm;
HandleMark hm;
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(false /* not full GC */);
if (TraceGen0Time) scavenge_time.start();
// Let the size policy know we're starting
size_policy->minor_collection_begin();
// Verify the object start arrays.
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
}
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
CardTableExtension::verify_all_young_refs_imprecise();
}
if (!ScavengeWithObjectsInToSpace) {
assert(young_gen->to_space()->is_empty(),
"Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear();
} else if (ZapUnusedHeapArea) {
young_gen->to_space()->mangle_unused_area();
}
save_to_space_top_before_gc();
//.........这里部分代码省略.........
示例9: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
if (GC_locker::check_active_before_gc()) {
return;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Increment the invocation count
heap->increment_total_collections(true /* full */);
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
// Verify object start arrays
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
}
heap->pre_full_gc_dump();
// Filled in below to track the state of the young gen after the collection.
bool eden_empty;
bool survivors_empty;
bool young_gen_empty;
{
HandleMark hm;
const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
// This is useful for debugging but don't change the output the
// the customer sees.
const char* gc_cause_str = "Full GC";
if (is_system_gc && PrintGCDetails) {
gc_cause_str = "Full GC (System)";
}
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */);
if (TraceGen1Time) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->major_collection_begin();
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
CodeCache::gc_prologue();
Threads::gc_prologue();
BiasedLocking::preserve_marks();
// Capture heap size before collection for printing.
size_t prev_used = heap->used();
// Capture perm gen size before collection for sizing.
size_t perm_gen_prev_used = perm_gen->used_in_bytes();
// For PrintGCDetails
size_t old_gen_prev_used = old_gen->used_in_bytes();
size_t young_gen_prev_used = young_gen->used_in_bytes();
allocate_stacks();
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
//.........这里部分代码省略.........
示例10: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
if (GC_locker::check_active_before_gc()) {
return false;
}
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCause::Cause gc_cause = heap->gc_cause();
_gc_timer->register_gc_start();
_gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
// CollectorPolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
// Increment the invocation count
heap->increment_total_collections(true /* full */);
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
heap->print_heap_before_gc();
heap->trace_heap_before_gc(_gc_tracer);
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify(" VerifyBeforeGC:");
}
// Verify object start arrays
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
}
heap->pre_full_gc_dump(_gc_timer);
// Filled in below to track the state of the young gen after the collection.
bool eden_empty;
bool survivors_empty;
bool young_gen_empty;
{
HandleMark hm;
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
if (TraceOldGenTime) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->major_collection_begin();
CodeCache::gc_prologue();
BiasedLocking::preserve_marks();
// Capture heap size before collection for printing.
size_t prev_used = heap->used();
// Capture metadata size before collection for sizing.
size_t metadata_prev_used = MetaspaceAux::used_bytes();
// For PrintGCDetails
size_t old_gen_prev_used = old_gen->used_in_bytes();
size_t young_gen_prev_used = young_gen->used_in_bytes();
allocate_stacks();
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
ref_processor()->setup_policy(clear_all_softrefs);
mark_sweep_phase1(clear_all_softrefs);
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
//.........这里部分代码省略.........
示例11: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSScavenge::invoke_no_policy() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(_preserved_mark_stack.is_empty(), "should be empty");
assert(_preserved_oop_stack.is_empty(), "should be empty");
_gc_timer.register_gc_start();
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
scavenge_entry.update();
if (GC_locker::check_active_before_gc()) {
return false;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
return false;
}
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
bool promotion_failure_occurred = false;
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
heap->increment_total_collections();
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if ((gc_cause != GCCause::_java_lang_system_gc) ||
UseAdaptiveSizePolicyWithSystemGC) {
// Gather the feedback data for eden occupancy.
young_gen->eden_space()->accumulate_statistics();
}
if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
}
heap->print_heap_before_gc();
heap->trace_heap_before_gc(&_gc_tracer);
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify(" VerifyBeforeGC:");
}
{
ResourceMark rm;
HandleMark hm;
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
if (TraceGen0Time) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->minor_collection_begin();
// Verify the object start arrays.
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
}
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
CardTableExtension::verify_all_young_refs_imprecise();
}
if (!ScavengeWithObjectsInToSpace) {
assert(young_gen->to_space()->is_empty(),
"Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear(SpaceDecorator::Mangle);
//.........这里部分代码省略.........
示例12: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool& notify_ref_lock, bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
if (GC_locker::is_active()) return;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Increment the invocation count
heap->increment_total_collections();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
if (PrintHeapAtGC) {
gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
Universe::print();
}
// Fill in TLABs
heap->ensure_parseability();
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
HandleMark hm;
TraceTime t1("Full GC", PrintGC, true, gclog_or_tty);
TraceCollectorStats tcs(counters());
if (TraceGen1Time) accumulated_time()->start();
// Let the size policy know we're starting
AdaptiveSizePolicy* size_policy = heap->size_policy();
size_policy->major_collection_begin();
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
NOT_CORE(CodeCache::gc_prologue());
Threads::gc_prologue();
// Capture heap size before collection for printing.
size_t prev_used = heap->used();
// Capture perm gen size before collection for sizing.
size_t perm_gen_prev_used = perm_gen->used_in_bytes();
bool marked_for_unloading = false;
allocate_stacks();
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
COMPILER2_ONLY(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
COMPILER2_ONLY(assert(DerivedPointerTable::is_active(), "Sanity"));
COMPILER2_ONLY(DerivedPointerTable::set_active(false));
mark_sweep_phase3();
mark_sweep_phase4();
restore_marks();
deallocate_stacks();
// "free at last gc" is calculated from these.
Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
Universe::set_heap_used_at_last_gc(Universe::heap()->used());
bool all_empty = young_gen->eden_space()->is_empty() &&
young_gen->from_space()->is_empty() &&
young_gen->to_space()->is_empty();
BarrierSet* bs = heap->barrier_set();
if (bs->is_a(BarrierSet::ModRef)) {
ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
MemRegion old_mr = heap->old_gen()->reserved();
MemRegion perm_mr = heap->perm_gen()->reserved();
assert(old_mr.end() <= perm_mr.start(), "Generations out of order");
if (all_empty) {
modBS->clear(MemRegion(old_mr.start(), perm_mr.end()));
} else {
modBS->invalidate(MemRegion(old_mr.start(), perm_mr.end()));
//.........这里部分代码省略.........
示例13: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSScavenge::invoke_no_policy(bool& notify_ref_lock) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
scavenge_entry.update();
if (GC_locker::is_active()) return;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
return;
}
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
AdaptiveSizePolicy* size_policy = heap->size_policy();
heap->increment_total_collections();
if (PrintHeapAtGC){
gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
Universe::print();
}
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
assert(promotion_failed() == false, "Sanity");
// Fill in TLABs
heap->ensure_parseability();
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
ResourceMark rm;
HandleMark hm;
TraceTime t1("GC", PrintGC, true, gclog_or_tty);
TraceCollectorStats tcs(counters());
if (TraceGen0Time) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->minor_collection_begin();
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
CardTableExtension::verify_all_young_refs_imprecise();
}
assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear();
NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
COMPILER2_ONLY(DerivedPointerTable::clear(););
reference_processor()->enable_discovery();
// We track how much was promoted to the next generation for
// the AdaptiveSizePolicy.
size_t old_gen_used_before = old_gen->object_space()->used_in_bytes();
// Reset our survivor overflow.
set_survivor_overflow(false);
// We need to save the old/perm top values before
// creating the promotion_manager. We pass the top
// values to the card_table, to prevent it from
// straying into the promotion labs.
HeapWord* old_top = old_gen->object_space()->top();
HeapWord* perm_top = perm_gen->object_space()->top();
// Release all previously held resources
gc_task_manager()->release_all_resources();
PSPromotionManager::pre_scavenge();
// We'll use the promotion manager again later.
PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
{
// TraceTime("Roots");
GCTaskQueue* q = GCTaskQueue::create();
//.........这里部分代码省略.........