本文整理汇总了C++中ParallelScavengeHeap::perm_gen方法的典型用法代码示例。如果您正苦于以下问题:C++ ParallelScavengeHeap::perm_gen方法的具体用法?C++ ParallelScavengeHeap::perm_gen怎么用?C++ ParallelScavengeHeap::perm_gen使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ParallelScavengeHeap
的用法示例。
在下文中一共展示了ParallelScavengeHeap::perm_gen方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: m
void PSMarkSweep::mark_sweep_phase2() {
EventMark m("2 compute new addresses");
TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
trace("2");
// Now all live objects are marked, compute the new object addresses.
// It is imperative that we traverse perm_gen LAST. If dead space is
// allowed a range of dead object may get overwritten by a dead int
// array. If perm_gen is not traversed last a klassOop may get
// overwritten. This is fine since it is dead, but if the class has dead
// instances we have to skip them, and in order to find their size we
// need the klassOop!
//
// It is not required that we traverse spaces in the same order in
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
// tracking expects us to do so. See comment under phase4.
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Begin compacting into the old gen
PSMarkSweepDecorator::set_destination_decorator_tenured();
// This will also compact the young gen spaces.
old_gen->precompact();
// Compact the perm gen into the perm gen
PSMarkSweepDecorator::set_destination_decorator_perm_gen();
perm_gen->precompact();
}
示例2: start_of_perm_gen
HeapWord* HeapInspection::start_of_perm_gen() {
if (is_shared_heap()) {
SharedHeap* sh = SharedHeap::heap();
return sh->perm_gen()->used_region().start();
}
#ifndef SERIALGC
ParallelScavengeHeap* psh = (ParallelScavengeHeap*)Universe::heap();
return psh->perm_gen()->object_space()->used_region().start();
#else
ShouldNotReachHere();
return NULL;
#endif // SERIALGC
}
示例3: initialize
void PSScavenge::initialize() {
// Arguments must have been parsed
if (AlwaysTenure) {
_tenuring_threshold = 0;
} else if (NeverTenure) {
_tenuring_threshold = markOopDesc::max_age + 1;
} else {
// We want to smooth out our startup times for the AdaptiveSizePolicy
_tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
MaxTenuringThreshold;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Set boundary between young_gen and old_gen
assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(),
"perm above old");
assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
"old above young");
_young_generation_boundary = young_gen->eden_space()->bottom();
// Initialize ref handling object for scavenging.
MemRegion mr = young_gen->reserved();
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
true, // mt discovery
(int) ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
NULL, // header provides liveness info
false); // next field updates do not need write barrier
// Cache the cardtable
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
_card_table = (CardTableExtension*)bs;
_counters = new CollectorCounters("PSScavenge", 0);
}
示例4: is_in_old_or_perm
// Static method
bool ParallelScavengeHeap::is_in_old_or_perm(oop* p) {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
"Must be ParallelScavengeHeap");
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
if (old_gen->is_in(p)) {
return true;
}
if (perm_gen->is_in(p)) {
return true;
}
return false;
}
示例5: advance_destination_decorator
void PSMarkSweepDecorator::advance_destination_decorator() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
assert(_destination_decorator != NULL, "Sanity");
guarantee(_destination_decorator != heap->perm_gen()->object_mark_sweep(), "Cannot advance perm gen decorator");
PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep();
PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep();
PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep();
PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep();
if ( _destination_decorator == first ) {
_destination_decorator = second;
} else if ( _destination_decorator == second ) {
_destination_decorator = third;
} else if ( _destination_decorator == third ) {
_destination_decorator = fourth;
} else {
fatal("PSMarkSweep attempting to advance past last compaction area");
}
}
示例6: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSScavenge::invoke_no_policy() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
elapsedTimer scavenge_time;
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
scavenge_entry.update();
if (GC_locker::check_active_before_gc()) {
return false;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
return false;
}
bool promotion_failure_occurred = false;
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
heap->increment_total_collections();
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if ((gc_cause != GCCause::_java_lang_system_gc) ||
UseAdaptiveSizePolicyWithSystemGC) {
// Gather the feedback data for eden occupancy.
young_gen->eden_space()->accumulate_statistics();
}
// We need to track unique scavenge invocations as well.
_total_invocations++;
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
assert(!NeverTenure||_tenuring_threshold==markWord::max_age+1,"Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
assert(promotion_failed() == false, "Sanity");
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
ResourceMark rm;
HandleMark hm;
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(false /* not full GC */);
if (TraceGen0Time) scavenge_time.start();
// Let the size policy know we're starting
size_policy->minor_collection_begin();
// Verify the object start arrays.
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
}
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
CardTableExtension::verify_all_young_refs_imprecise();
}
if (!ScavengeWithObjectsInToSpace) {
assert(young_gen->to_space()->is_empty(),
"Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear();
} else if (ZapUnusedHeapArea) {
young_gen->to_space()->mangle_unused_area();
}
save_to_space_top_before_gc();
//.........这里部分代码省略.........
示例7: set_destination_decorator_perm_gen
void PSMarkSweepDecorator::set_destination_decorator_perm_gen() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
_destination_decorator = heap->perm_gen()->object_mark_sweep();
}
示例8: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
if (GC_locker::check_active_before_gc()) {
return;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Increment the invocation count
heap->increment_total_collections(true /* full */);
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
// Verify object start arrays
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
}
heap->pre_full_gc_dump();
// Filled in below to track the state of the young gen after the collection.
bool eden_empty;
bool survivors_empty;
bool young_gen_empty;
{
HandleMark hm;
const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
// This is useful for debugging but don't change the output the
// the customer sees.
const char* gc_cause_str = "Full GC";
if (is_system_gc && PrintGCDetails) {
gc_cause_str = "Full GC (System)";
}
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */);
if (TraceGen1Time) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->major_collection_begin();
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
CodeCache::gc_prologue();
Threads::gc_prologue();
BiasedLocking::preserve_marks();
// Capture heap size before collection for printing.
size_t prev_used = heap->used();
// Capture perm gen size before collection for sizing.
size_t perm_gen_prev_used = perm_gen->used_in_bytes();
// For PrintGCDetails
size_t old_gen_prev_used = old_gen->used_in_bytes();
size_t young_gen_prev_used = young_gen->used_in_bytes();
allocate_stacks();
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
//.........这里部分代码省略.........
示例9: heap_inspection
void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
ResourceMark rm;
HeapWord* ref;
CollectedHeap* heap = Universe::heap();
bool is_shared_heap = false;
switch (heap->kind()) {
case CollectedHeap::G1CollectedHeap:
case CollectedHeap::GenCollectedHeap: {
is_shared_heap = true;
SharedHeap* sh = (SharedHeap*)heap;
if (need_prologue) {
sh->gc_prologue(false /* !full */); // get any necessary locks, etc.
}
ref = sh->perm_gen()->used_region().start();
break;
}
#ifndef SERIALGC
case CollectedHeap::ParallelScavengeHeap: {
ParallelScavengeHeap* psh = (ParallelScavengeHeap*)heap;
ref = psh->perm_gen()->object_space()->used_region().start();
break;
}
#endif // SERIALGC
default:
ShouldNotReachHere(); // Unexpected heap kind for this op
}
// Collect klass instance info
KlassInfoTable cit(KlassInfoTable::cit_size, ref);
if (!cit.allocation_failed()) {
// Iterate over objects in the heap
RecordInstanceClosure ric(&cit);
// If this operation encounters a bad object when using CMS,
// consider using safe_object_iterate() which avoids perm gen
// objects that may contain bad references.
Universe::heap()->object_iterate(&ric);
// Report if certain classes are not counted because of
// running out of C-heap for the histogram.
size_t missed_count = ric.missed_count();
if (missed_count != 0) {
st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
" total instances in data below",
missed_count);
}
// Sort and print klass instance info
KlassInfoHisto histo("\n"
" num #instances #bytes class name\n"
"----------------------------------------------",
KlassInfoHisto::histo_initial_size);
HistoClosure hc(&histo);
cit.iterate(&hc);
histo.sort();
histo.print_on(st);
} else {
st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
}
st->flush();
if (need_prologue && is_shared_heap) {
SharedHeap* sh = (SharedHeap*)heap;
sh->gc_epilogue(false /* !full */); // release all acquired locks, etc.
}
}
示例10: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool& notify_ref_lock, bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
if (GC_locker::is_active()) return;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Increment the invocation count
heap->increment_total_collections();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
if (PrintHeapAtGC) {
gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
Universe::print();
}
// Fill in TLABs
heap->ensure_parseability();
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
HandleMark hm;
TraceTime t1("Full GC", PrintGC, true, gclog_or_tty);
TraceCollectorStats tcs(counters());
if (TraceGen1Time) accumulated_time()->start();
// Let the size policy know we're starting
AdaptiveSizePolicy* size_policy = heap->size_policy();
size_policy->major_collection_begin();
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
NOT_CORE(CodeCache::gc_prologue());
Threads::gc_prologue();
// Capture heap size before collection for printing.
size_t prev_used = heap->used();
// Capture perm gen size before collection for sizing.
size_t perm_gen_prev_used = perm_gen->used_in_bytes();
bool marked_for_unloading = false;
allocate_stacks();
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
COMPILER2_ONLY(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
COMPILER2_ONLY(assert(DerivedPointerTable::is_active(), "Sanity"));
COMPILER2_ONLY(DerivedPointerTable::set_active(false));
mark_sweep_phase3();
mark_sweep_phase4();
restore_marks();
deallocate_stacks();
// "free at last gc" is calculated from these.
Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
Universe::set_heap_used_at_last_gc(Universe::heap()->used());
bool all_empty = young_gen->eden_space()->is_empty() &&
young_gen->from_space()->is_empty() &&
young_gen->to_space()->is_empty();
BarrierSet* bs = heap->barrier_set();
if (bs->is_a(BarrierSet::ModRef)) {
ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
MemRegion old_mr = heap->old_gen()->reserved();
MemRegion perm_mr = heap->perm_gen()->reserved();
assert(old_mr.end() <= perm_mr.start(), "Generations out of order");
if (all_empty) {
modBS->clear(MemRegion(old_mr.start(), perm_mr.end()));
} else {
modBS->invalidate(MemRegion(old_mr.start(), perm_mr.end()));
//.........这里部分代码省略.........
示例11: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSScavenge::invoke_no_policy(bool& notify_ref_lock) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
scavenge_entry.update();
if (GC_locker::is_active()) return;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
return;
}
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
AdaptiveSizePolicy* size_policy = heap->size_policy();
heap->increment_total_collections();
if (PrintHeapAtGC){
gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
Universe::print();
}
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
assert(promotion_failed() == false, "Sanity");
// Fill in TLABs
heap->ensure_parseability();
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
ResourceMark rm;
HandleMark hm;
TraceTime t1("GC", PrintGC, true, gclog_or_tty);
TraceCollectorStats tcs(counters());
if (TraceGen0Time) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->minor_collection_begin();
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
CardTableExtension::verify_all_young_refs_imprecise();
}
assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear();
NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
COMPILER2_ONLY(DerivedPointerTable::clear(););
reference_processor()->enable_discovery();
// We track how much was promoted to the next generation for
// the AdaptiveSizePolicy.
size_t old_gen_used_before = old_gen->object_space()->used_in_bytes();
// Reset our survivor overflow.
set_survivor_overflow(false);
// We need to save the old/perm top values before
// creating the promotion_manager. We pass the top
// values to the card_table, to prevent it from
// straying into the promotion labs.
HeapWord* old_top = old_gen->object_space()->top();
HeapWord* perm_top = perm_gen->object_space()->top();
// Release all previously held resources
gc_task_manager()->release_all_resources();
PSPromotionManager::pre_scavenge();
// We'll use the promotion manager again later.
PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
{
// TraceTime("Roots");
GCTaskQueue* q = GCTaskQueue::create();
//.........这里部分代码省略.........
示例12: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSScavenge::invoke_no_policy() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(_preserved_mark_stack.is_empty(), "should be empty");
assert(_preserved_oop_stack.is_empty(), "should be empty");
_gc_timer.register_gc_start(os::elapsed_counter());
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
scavenge_entry.update();
if (GC_locker::check_active_before_gc()) {
return false;
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
return false;
}
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
bool promotion_failure_occurred = false;
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
heap->increment_total_collections();
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if ((gc_cause != GCCause::_java_lang_system_gc) ||
UseAdaptiveSizePolicyWithSystemGC) {
// Gather the feedback data for eden occupancy.
young_gen->eden_space()->accumulate_statistics();
}
if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
}
heap->print_heap_before_gc();
heap->trace_heap_before_gc(&_gc_tracer);
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
heap->ensure_parsability(true); // retire TLABs
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
Universe::verify();
}
{
ResourceMark rm;
HandleMark hm;
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
if (TraceGen0Time) accumulated_time()->start();
// Let the size policy know we're starting
size_policy->minor_collection_begin();
// Verify the object start arrays.
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
}
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
CardTableExtension::verify_all_young_refs_imprecise();
}
if (!ScavengeWithObjectsInToSpace) {
//.........这里部分代码省略.........