本文整理汇总了C++中ParallelScavengeHeap::barrier_set方法的典型用法代码示例。如果您正苦于以下问题:C++ ParallelScavengeHeap::barrier_set方法的具体用法?C++ ParallelScavengeHeap::barrier_set怎么用?C++ ParallelScavengeHeap::barrier_set使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ParallelScavengeHeap
的用法示例。
在下文中一共展示了ParallelScavengeHeap::barrier_set方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CheckForUnmarkedObjects
CheckForUnmarkedObjects() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_young_gen = heap->young_gen();
_card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
// No point in asserting barrier set type here. Need to make CardTableExtension
// a unique barrier set type.
}
示例2: CheckForUnmarkedObjects
CheckForUnmarkedObjects() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
_young_gen = heap->young_gen();
_card_table = (CardTableExtension*)heap->barrier_set();
// No point in asserting barrier set type here. Need to make CardTableExtension
// a unique barrier set type.
}
示例3: invoke_no_policy
//.........这里部分代码省略.........
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
mark_sweep_phase3();
mark_sweep_phase4();
restore_marks();
deallocate_stacks();
if (ZapUnusedHeapArea) {
// Do a complete mangle (top to end) because the usage for
// scratch does not maintain a top pointer.
young_gen->to_space()->mangle_unused_area_complete();
}
eden_empty = young_gen->eden_space()->is_empty();
if (!eden_empty) {
eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
}
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();
survivors_empty = young_gen->from_space()->is_empty() &&
young_gen->to_space()->is_empty();
young_gen_empty = eden_empty && survivors_empty;
BarrierSet* bs = heap->barrier_set();
if (bs->is_a(BarrierSet::ModRef)) {
ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
MemRegion old_mr = heap->old_gen()->reserved();
MemRegion perm_mr = heap->perm_gen()->reserved();
assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
if (young_gen_empty) {
modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
} else {
modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
}
}
BiasedLocking::restore_marks();
Threads::gc_epilogue();
CodeCache::gc_epilogue();
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
ref_processor()->enqueue_discovered_references(NULL);
// Update time of last GC
reset_millis_since_last_gc();
// Let the size policy know we're done
size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
if (UseAdaptiveSizePolicy) {
if (PrintAdaptiveSizePolicy) {
gclog_or_tty->print("AdaptiveSizeStart: ");
gclog_or_tty->stamp();
示例4: invoke_no_policy
//.........这里部分代码省略.........
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
mark_sweep_phase3();
mark_sweep_phase4();
restore_marks();
deallocate_stacks();
if (ZapUnusedHeapArea) {
// Do a complete mangle (top to end) because the usage for
// scratch does not maintain a top pointer.
young_gen->to_space()->mangle_unused_area_complete();
}
eden_empty = young_gen->eden_space()->is_empty();
if (!eden_empty) {
eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
}
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();
survivors_empty = young_gen->from_space()->is_empty() &&
young_gen->to_space()->is_empty();
young_gen_empty = eden_empty && survivors_empty;
ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
MemRegion old_mr = heap->old_gen()->reserved();
if (young_gen_empty) {
modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
} else {
modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
}
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge();
MetaspaceAux::verify_metrics();
BiasedLocking::restore_marks();
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
ref_processor()->enqueue_discovered_references(NULL);
// Update time of last GC
reset_millis_since_last_gc();
// Let the size policy know we're done
size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
if (UseAdaptiveSizePolicy) {
if (PrintAdaptiveSizePolicy) {
gclog_or_tty->print("AdaptiveSizeStart: ");
gclog_or_tty->stamp();
gclog_or_tty->print_cr(" collection: %d ",
heap->total_collections());
示例5: invoke_no_policy
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool& notify_ref_lock, bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
if (GC_locker::is_active()) return;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Increment the invocation count
heap->increment_total_collections();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
if (PrintHeapAtGC) {
gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
Universe::print();
}
// Fill in TLABs
heap->ensure_parseability();
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
tty->print(" VerifyBeforeGC:");
Universe::verify(true);
}
{
HandleMark hm;
TraceTime t1("Full GC", PrintGC, true, gclog_or_tty);
TraceCollectorStats tcs(counters());
if (TraceGen1Time) accumulated_time()->start();
// Let the size policy know we're starting
AdaptiveSizePolicy* size_policy = heap->size_policy();
size_policy->major_collection_begin();
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
NOT_CORE(CodeCache::gc_prologue());
Threads::gc_prologue();
// Capture heap size before collection for printing.
size_t prev_used = heap->used();
// Capture perm gen size before collection for sizing.
size_t perm_gen_prev_used = perm_gen->used_in_bytes();
bool marked_for_unloading = false;
allocate_stacks();
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
COMPILER2_ONLY(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
COMPILER2_ONLY(assert(DerivedPointerTable::is_active(), "Sanity"));
COMPILER2_ONLY(DerivedPointerTable::set_active(false));
mark_sweep_phase3();
mark_sweep_phase4();
restore_marks();
deallocate_stacks();
// "free at last gc" is calculated from these.
Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
Universe::set_heap_used_at_last_gc(Universe::heap()->used());
bool all_empty = young_gen->eden_space()->is_empty() &&
young_gen->from_space()->is_empty() &&
young_gen->to_space()->is_empty();
BarrierSet* bs = heap->barrier_set();
if (bs->is_a(BarrierSet::ModRef)) {
ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
MemRegion old_mr = heap->old_gen()->reserved();
MemRegion perm_mr = heap->perm_gen()->reserved();
assert(old_mr.end() <= perm_mr.start(), "Generations out of order");
if (all_empty) {
modBS->clear(MemRegion(old_mr.start(), perm_mr.end()));
} else {
modBS->invalidate(MemRegion(old_mr.start(), perm_mr.end()));
//.........这里部分代码省略.........