本文整理汇总了C++中ParallelScavengeHeap类的典型用法代码示例。如果您正苦于以下问题:C++ ParallelScavengeHeap类的具体用法?C++ ParallelScavengeHeap怎么用?C++ ParallelScavengeHeap使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ParallelScavengeHeap类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
void PSMarkSweep::allocate_stacks() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSYoungGen* young_gen = heap->young_gen();
MutableSpace* to_space = young_gen->to_space();
_preserved_marks = (PreservedMark*)to_space->top();
_preserved_count = 0;
// We want to calculate the size in bytes first.
_preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
// Now divide by the size of a PreservedMark
_preserved_count_max /= sizeof(PreservedMark);
_preserved_mark_stack = NULL;
_preserved_oop_stack = NULL;
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
int size = SystemDictionary::number_of_classes() * 2;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
// now until we investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
}
示例2: assert
// This method contains all heap specific policy for invoking mark sweep.
// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
// the heap. It will do nothing further. If we need to bail out for policy
// reasons, scavenge before full gc, or any other specialized behavior, it
// needs to be added here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
void PSMarkSweep::invoke(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!Universe::heap()->is_gc_active(), "not reentrant");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
PSAdaptiveSizePolicy* policy = heap->size_policy();
// Before each allocation/collection attempt, find out from the
// policy object if GCs are, on the whole, taking too long. If so,
// bail out without attempting a collection. The exceptions are
// for explicitly requested GC's.
if (!policy->gc_time_limit_exceeded() ||
GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
IsGCActiveMark mark;
if (ScavengeBeforeFullGC) {
PSScavenge::invoke_no_policy();
}
int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
}
}
示例3: doit
void VM_GC_RegularScavenge::doit() {
HandleMark hm;
ParallelScavengeHeap* psh = (ParallelScavengeHeap*)Universe::heap();
psh->ensure_parsability(false); // must happen, even if collection does
// not happen (e.g. due to GC_locker)
psh->young_collect_as_vm_thread(GCCause::_regular_scavenge);
}
示例4: CheckForUnmarkedObjects
CheckForUnmarkedObjects() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_young_gen = heap->young_gen();
_card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
// No point in asserting barrier set type here. Need to make CardTableExtension
// a unique barrier set type.
}
示例5: m
void PSMarkSweep::mark_sweep_phase2() {
EventMark m("2 compute new addresses");
TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
trace("2");
// Now all live objects are marked, compute the new object addresses.
// It is imperative that we traverse perm_gen LAST. If dead space is
// allowed a range of dead object may get overwritten by a dead int
// array. If perm_gen is not traversed last a klassOop may get
// overwritten. This is fine since it is dead, but if the class has dead
// instances we have to skip them, and in order to find their size we
// need the klassOop!
//
// It is not required that we traverse spaces in the same order in
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
// tracking expects us to do so. See comment under phase4.
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
// Begin compacting into the old gen
PSMarkSweepDecorator::set_destination_decorator_tenured();
// This will also compact the young gen spaces.
old_gen->precompact();
// Compact the perm gen into the perm gen
PSMarkSweepDecorator::set_destination_decorator_perm_gen();
perm_gen->precompact();
}
示例6: assert
// This method iterates over all objects in the young generation,
// unforwarding markOops. It then restores any preserved mark oops,
// and clears the _preserved_mark_stack.
void PSScavenge::clean_up_failed_promotion() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSYoungGen* young_gen = heap->young_gen();
{
ResourceMark rm;
// Unforward all pointers in the young gen.
PSPromotionFailedClosure unforward_closure;
young_gen->object_iterate(&unforward_closure);
if (PrintGC && Verbose) {
gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
}
// Restore any saved marks.
while (!_preserved_oop_stack.is_empty()) {
oop obj = _preserved_oop_stack.pop();
markOop mark = _preserved_mark_stack.pop();
obj->set_mark(mark);
}
// Clear the preserved mark and oop stack caches.
_preserved_mark_stack.clear(true);
_preserved_oop_stack.clear(true);
}
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
}
示例7: PSKeepAliveClosure
PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
_to_space = heap->young_gen()->to_space();
assert(_promotion_manager != NULL, "Sanity");
}
示例8: available_to_live
// This method assumes that from-space has live data and that
// any shrinkage of the young gen is limited by location of
// from-space.
size_t PSYoungGen::available_to_live() {
size_t delta_in_survivor = 0;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t space_alignment = heap->intra_heap_alignment();
const size_t gen_alignment = heap->young_gen_alignment();
MutableSpace* space_shrinking = NULL;
if (from_space()->end() > to_space()->end()) {
space_shrinking = from_space();
} else {
space_shrinking = to_space();
}
// Include any space that is committed but not included in
// the survivor spaces.
assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
"Survivor space beyond high end");
size_t unused_committed = pointer_delta(virtual_space()->high(),
space_shrinking->end(), sizeof(char));
if (space_shrinking->is_empty()) {
// Don't let the space shrink to 0
assert(space_shrinking->capacity_in_bytes() >= space_alignment,
"Space is too small");
delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
} else {
delta_in_survivor = pointer_delta(space_shrinking->end(),
space_shrinking->top(),
sizeof(char));
}
size_t delta_in_bytes = unused_committed + delta_in_survivor;
delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
return delta_in_bytes;
}
示例9: tm
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
trace(" 1");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// General strong roots.
{
ParallelScavengeHeap::ParStrongRootsScope psrs;
Universe::oops_do(mark_and_push_closure());
JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
ObjectSynchronizer::oops_do(mark_and_push_closure());
FlatProfiler::oops_do(mark_and_push_closure());
Management::oops_do(mark_and_push_closure());
JvmtiExport::oops_do(mark_and_push_closure());
SystemDictionary::always_strong_oops_do(mark_and_push_closure());
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
//CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
}
// Flush marking stack.
follow_stack();
// Process reference objects found during marking
{
ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
gc_tracer()->report_gc_reference_stats(stats);
}
// Follow system dictionary roots and unload classes
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
// Follow code cache roots
CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
purged_class);
follow_stack(); // Flush marking stack
// Update subklass/sibling/implementor links of live klasses
follow_weak_klass_links();
assert(_marking_stack.is_empty(), "just drained");
// Visit memoized mdo's and clear unmarked weak refs
follow_mdo_weak_refs();
assert(_marking_stack.is_empty(), "just drained");
// Visit interned string tables and delete unmarked oops
StringTable::unlink(is_alive_closure());
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
assert(_marking_stack.is_empty(), "stack should be empty by now");
_gc_tracer->report_object_count_after_gc(is_alive_closure());
}
示例10: m
void PSMarkSweep::mark_sweep_phase1( bool& marked_for_unloading, bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
EventMark m("1 mark object");
TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
trace(" 1");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// General strong roots.
Universe::oops_do(mark_and_push_closure());
JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
Threads::oops_do(mark_and_push_closure());
ObjectSynchronizer::oops_do(mark_and_push_closure());
FlatProfiler::oops_do(mark_and_push_closure());
SystemDictionary::always_strong_oops_do(mark_and_push_closure());
guarantee(!jvmdi::enabled(), "Should not be used with jvmdi");
vmSymbols::oops_do(mark_and_push_closure());
// Flush marking stack.
follow_stack();
// Process reference objects found during marking
ReferencePolicy *soft_ref_policy;
if (clear_all_softrefs) {
soft_ref_policy = new AlwaysClearPolicy();
} else {
NOT_COMPILER2(soft_ref_policy = new LRUCurrentHeapPolicy();)
COMPILER2_ONLY(soft_ref_policy = new LRUMaxHeapPolicy();)
}
示例11: should_scavenge
inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
if (check_to_space) {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
return should_scavenge(p, heap->young_gen()->to_space());
}
return should_scavenge(p);
}
示例12: assert
void PSPromotionManager::initialize() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
_old_gen = heap->old_gen();
_young_space = heap->young_gen()->to_space();
assert(_manager_array == NULL, "Attempt to initialize twice");
_manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC);
guarantee(_manager_array != NULL, "Could not initialize promotion manager");
_stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager");
// Create and register the PSPromotionManager(s) for the worker threads.
for(uint i=0; i<ParallelGCThreads; i++) {
_manager_array[i] = new PSPromotionManager();
guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
}
// The VMThread gets its own PSPromotionManager, which is not available
// for work stealing.
_manager_array[ParallelGCThreads] = new PSPromotionManager();
guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
}
示例13: assert
void PSYoungGen::compute_initial_space_boundaries() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Compute sizes
size_t alignment = heap->intra_heap_alignment();
size_t size = virtual_space()->committed_size();
size_t survivor_size = size / InitialSurvivorRatio;
survivor_size = align_size_down(survivor_size, alignment);
// ... but never less than an alignment
survivor_size = MAX2(survivor_size, alignment);
// Young generation is eden + 2 survivor spaces
size_t eden_size = size - (2 * survivor_size);
// Now go ahead and set 'em.
set_space_boundaries(eden_size, survivor_size);
space_invariants();
if (UsePerfData) {
_eden_counters->update_capacity();
_from_counters->update_capacity();
_to_counters->update_capacity();
}
}
示例14: tm
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace(" 1");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Need to clear claim bits before the tracing starts.
ClassLoaderDataGraph::clear_claimed_marks();
// General strong roots.
{
ParallelScavengeHeap::ParStrongRootsScope psrs;
Universe::oops_do(mark_and_push_closure());
JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
ObjectSynchronizer::oops_do(mark_and_push_closure());
FlatProfiler::oops_do(mark_and_push_closure());
Management::oops_do(mark_and_push_closure());
JvmtiExport::oops_do(mark_and_push_closure());
SystemDictionary::always_strong_oops_do(mark_and_push_closure());
ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
//CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
}
// Flush marking stack.
follow_stack();
// Process reference objects found during marking
{
ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id());
gc_tracer()->report_gc_reference_stats(stats);
}
// This is the point where the entire marking should have completed.
assert(_marking_stack.is_empty(), "Marking should have completed");
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
// Unload nmethods.
CodeCache::do_unloading(is_alive_closure(), purged_class);
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(is_alive_closure());
// Delete entries for dead interned strings.
StringTable::unlink(is_alive_closure());
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
_gc_tracer->report_object_count_after_gc(is_alive_closure());
}
示例15: assert
size_t ASPSOldGen::available_for_expansion() {
assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
size_t result = gen_size_limit() - virtual_space()->committed_size();
size_t result_aligned = align_size_down(result, heap->old_gen_alignment());
return result_aligned;
}