本文整理汇总了C++中GenCollectedHeap::trace_heap_after_gc方法的典型用法代码示例。如果您正苦于以下问题:C++ GenCollectedHeap::trace_heap_after_gc方法的具体用法?C++ GenCollectedHeap::trace_heap_after_gc怎么用?C++ GenCollectedHeap::trace_heap_after_gc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GenCollectedHeap
的用法示例。
在下文中一共展示了GenCollectedHeap::trace_heap_after_gc方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: invoke_at_safepoint
void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
GenCollectedHeap* gch = GenCollectedHeap::heap();
#ifdef ASSERT
if (gch->collector_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earlier");
}
#endif
// hook up weak ref data so it can be used during Mark-Sweep
assert(ref_processor() == NULL, "no stomping");
assert(rp != NULL, "should be non-NULL");
set_ref_processor(rp);
rp->setup_policy(clear_all_softrefs);
gch->trace_heap_before_gc(_gc_tracer);
// When collecting the permanent generation Method*s may be moving,
// so we either have to flush all bcp data or convert it into bci.
CodeCache::gc_prologue();
// Increment the invocation count
_total_invocations++;
// Capture used regions for each generation that will be
// subject to collection, so that card table adjustments can
// be made intelligently (see clear / invalidate further below).
gch->save_used_regions();
allocate_stacks();
mark_sweep_phase1(clear_all_softrefs);
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
#if defined(COMPILER2) || INCLUDE_JVMCI
assert(DerivedPointerTable::is_active(), "Sanity");
DerivedPointerTable::set_active(false);
#endif
mark_sweep_phase3();
mark_sweep_phase4();
restore_marks();
// Set saved marks for allocation profiler (and other things? -- dld)
// (Should this be in general part?)
gch->save_marks();
deallocate_stacks();
// If compaction completely evacuated the young generation then we
// can clear the card table. Otherwise, we must invalidate
// it (consider all cards dirty). In the future, we might consider doing
// compaction within generations only, and doing card-table sliding.
CardTableRS* rs = gch->rem_set();
Generation* old_gen = gch->old_gen();
// Clear/invalidate below make use of the "prev_used_regions" saved earlier.
if (gch->young_gen()->used() == 0) {
// We've evacuated the young generation.
rs->clear_into_younger(old_gen);
} else {
// Invalidate the cards corresponding to the currently used
// region and clear those corresponding to the evacuated region.
rs->invalidate_or_clear(old_gen);
}
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
// refs processing: clean slate
set_ref_processor(NULL);
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();
// Update time of last gc for all generations we collected
// (which currently is all the generations in the heap).
// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
gch->update_time_of_last_gc(now);
gch->trace_heap_after_gc(_gc_tracer);
}
示例2: collect
//.........这里部分代码省略.........
&fsc_with_no_gc_barrier,
&fsc_with_gc_barrier);
assert(gch->no_allocs_since_save_marks(0),
"save marks have not been newly set.");
int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
gch->gen_process_strong_roots(_level,
true, // Process younger gens, if any,
// as strong roots.
true, // activate StrongRootsScope
true, // is scavenging
SharedHeap::ScanningOption(so),
&fsc_with_no_gc_barrier,
true, // walk *all* scavengable nmethods
&fsc_with_gc_barrier,
&klass_scan_closure);
// "evacuate followers".
evacuate_followers.do_void();
FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
ReferenceProcessor* rp = ref_processor();
rp->setup_policy(clear_all_soft_refs);
const ReferenceProcessorStats& stats =
rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
NULL, _gc_timer);
gc_tracer.report_gc_reference_stats(stats);
if (!_promotion_failed) {
// Swap the survivor spaces.
eden()->clear(SpaceDecorator::Mangle);
from()->clear(SpaceDecorator::Mangle);
if (ZapUnusedHeapArea) {
// This is now done here because of the piece-meal mangling which
// can check for valid mangling at intermediate points in the
// collection(s). When a minor collection fails to collect
// sufficient space resizing of the young generation can occur
// an redistribute the spaces in the young generation. Mangle
// here so that unzapped regions don't get distributed to
// other spaces.
to()->mangle_unused_area();
}
swap_spaces();
assert(to()->is_empty(), "to space should be empty now");
adjust_desired_tenuring_threshold();
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
size_policy->reset_gc_overhead_limit_count();
if (PrintGC && !PrintGCDetails) {
gch->print_heap_change(gch_prev_used);
}
assert(!gch->incremental_collection_failed(), "Should be clear");
} else {
assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments.
remove_forwarding_pointers();
if (PrintGCDetails) {
gclog_or_tty->print(" (promotion failed) ");
}
// Add to-space to the list of space to compact
// when a promotion failure has occurred. In that
// case there can be live objects in to-space
// as a result of a partial evacuation of eden
// and from-space.
swap_spaces(); // For uniformity wrt ParNewGeneration.
from()->set_next_compaction_space(to());
gch->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred();
gc_tracer.report_promotion_failed(_promotion_failed_info);
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
}
// set new iteration safe limit for the survivor spaces
from()->set_concurrent_iteration_safe_limit(from()->top());
to()->set_concurrent_iteration_safe_limit(to()->top());
SpecializationStats::print();
// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
update_time_of_last_gc(now);
gch->trace_heap_after_gc(&gc_tracer);
gc_tracer.report_tenuring_threshold(tenuring_threshold());
_gc_timer->register_gc_end();
gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
}
示例3: invoke_at_safepoint
//.........这里部分代码省略.........
#ifdef ASSERT
if (gch->collector_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earlier");
}
#endif
// hook up weak ref data so it can be used during Mark-Sweep
assert(ref_processor() == NULL, "no stomping");
assert(rp != NULL, "should be non-NULL");
_ref_processor = rp;
rp->setup_policy(clear_all_softrefs);
GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
gch->trace_heap_before_gc(_gc_tracer);
// When collecting the permanent generation Method*s may be moving,
// so we either have to flush all bcp data or convert it into bci.
CodeCache::gc_prologue();
Threads::gc_prologue();
// Increment the invocation count
_total_invocations++;
// Capture heap size before collection for printing.
size_t gch_prev_used = gch->used();
// Capture used regions for each generation that will be
// subject to collection, so that card table adjustments can
// be made intelligently (see clear / invalidate further below).
gch->save_used_regions(level);
allocate_stacks();
mark_sweep_phase1(level, clear_all_softrefs);
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
mark_sweep_phase3(level);
mark_sweep_phase4();
restore_marks();
// Set saved marks for allocation profiler (and other things? -- dld)
// (Should this be in general part?)
gch->save_marks();
deallocate_stacks();
// If compaction completely evacuated all generations younger than this
// one, then we can clear the card table. Otherwise, we must invalidate
// it (consider all cards dirty). In the future, we might consider doing
// compaction within generations only, and doing card-table sliding.
bool all_empty = true;
for (int i = 0; all_empty && i < level; i++) {
Generation* g = gch->get_gen(i);
all_empty = all_empty && gch->get_gen(i)->used() == 0;
}
GenRemSet* rs = gch->rem_set();
Generation* old_gen = gch->get_gen(level);
// Clear/invalidate below make use of the "prev_used_regions" saved earlier.
if (all_empty) {
// We've evacuated all generations below us.
rs->clear_into_younger(old_gen);
} else {
// Invalidate the cards corresponding to the currently used
// region and clear those corresponding to the evacuated region.
rs->invalidate_or_clear(old_gen);
}
Threads::gc_epilogue();
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
if (PrintGC && !PrintGCDetails) {
gch->print_heap_change(gch_prev_used);
}
// refs processing: clean slate
_ref_processor = NULL;
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();
// Update time of last gc for all generations we collected
// (which curently is all the generations in the heap).
// We need to use a monotonically non-deccreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
gch->update_time_of_last_gc(now);
gch->trace_heap_after_gc(_gc_tracer);
}