本文整理汇总了C++中GenCollectedHeap::total_collections方法的典型用法代码示例。如果您正苦于以下问题:C++ GenCollectedHeap::total_collections方法的具体用法?C++ GenCollectedHeap::total_collections怎么用?C++ GenCollectedHeap::total_collections使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GenCollectedHeap
的用法示例。
在下文中一共展示了GenCollectedHeap::total_collections方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: doit
// VM operation to invoke a concurrent collection of a
// GenCollectedHeap heap.
void VM_GenCollectFullConcurrent::doit() {
assert(Thread::current()->is_VM_thread(), "Should be VM thread");
assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
GenCollectedHeap* gch = GenCollectedHeap::heap();
if (_gc_count_before == gch->total_collections()) {
// The "full" of do_full_collection call below "forces"
// a collection; the second arg, 0, below ensures that
// only the young gen is collected. XXX In the future,
// we'll probably need to have something in this interface
// to say do this only if we are sure we will not bail
// out to a full collection in this attempt, but that's
// for the future.
assert(SafepointSynchronize::is_at_safepoint(),
"We can only be executing this arm of if at a safepoint");
GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
} // Else no need for a foreground young gc
assert((_gc_count_before < gch->total_collections()) ||
(GCLocker::is_active() /* gc may have been skipped */
&& (_gc_count_before == gch->total_collections())),
"total_collections() should be monotonically increasing");
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
if (gch->total_full_collections() == _full_gc_count_before) {
// Nudge the CMS thread to start a concurrent collection.
CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
} else {
assert(_full_gc_count_before < gch->total_full_collections(), "Error");
FullGCCount_lock->notify_all(); // Inform the Java thread its work is done
}
}
示例2: evaluate_at_safepoint
bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
Thread* thr = Thread::current();
assert(thr != NULL, "Unexpected tid");
if (!thr->is_Java_thread()) {
assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
GenCollectedHeap* gch = GenCollectedHeap::heap();
if (_gc_count_before != gch->total_collections()) {
// No need to do a young gc, we'll just nudge the CMS thread
// in the doit() method above, to be executed soon.
assert(_gc_count_before < gch->total_collections(),
"total_collections() should be monotonically increasing");
return false; // no need for foreground young gc
}
}
return true; // may still need foreground young gc
}
示例3: wait_on_cms_lock_for_scavenge
// Wait until the next synchronous GC, a concurrent full gc request,
// or a timeout, whichever is earlier.
void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
// Wait time in millis or 0 value representing infinite wait for a scavenge
assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
GenCollectedHeap* gch = GenCollectedHeap::heap();
double start_time_secs = os::elapsedTime();
double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
// Total collections count before waiting loop
unsigned int before_count;
{
MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
before_count = gch->total_collections();
}
unsigned int loop_count = 0;
while(!_should_terminate) {
double now_time = os::elapsedTime();
long wait_time_millis;
if(t_millis != 0) {
// New wait limit
wait_time_millis = (long) ((end_time_secs - now_time) * MILLIUNITS);
if(wait_time_millis <= 0) {
// Wait time is over
break;
}
} else {
// No wait limit, wait if necessary forever
wait_time_millis = 0;
}
// Wait until the next event or the remaining timeout
{
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (_should_terminate || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
assert(t_millis == 0 || wait_time_millis > 0, "Sanity");
CGC_lock->wait(Mutex::_no_safepoint_check_flag, wait_time_millis);
clear_CMS_flag(CMS_cms_wants_token);
assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
"Should not be set");
}
// Extra wait time check before entering the heap lock to get the collection count
if(t_millis != 0 && os::elapsedTime() >= end_time_secs) {
// Wait time is over
break;
}
// Total collections count after the event
unsigned int after_count;
{
MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
after_count = gch->total_collections();
}
if(before_count != after_count) {
// There was a collection - success
break;
}
// Too many loops warning
if(++loop_count == 0) {
warning("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
}
}
}
示例4: resize_spaces
//.........这里部分代码省略.........
}
// eden_end may have moved so again make sure
// the to-space and eden don't overlap.
to_start = MAX2(eden_end, to_start);
// from-space
size_t from_used = from()->used();
if (requested_survivor_size > from_used) {
if (from_start + requested_survivor_size >= from_start) {
from_end = from_start + requested_survivor_size;
}
if (from_end > virtual_space()->high()) {
from_end = virtual_space()->high();
}
}
assert(to_start >= eden_end, "to-space should be above eden");
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" [eden_start .. eden_end): "
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
eden_start,
eden_end,
pointer_delta(eden_end, eden_start, sizeof(char)));
gclog_or_tty->print_cr(" [ to_start .. to_end): "
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
to_start,
to_end,
pointer_delta( to_end, to_start, sizeof(char)));
gclog_or_tty->print_cr(" [from_start .. from_end): "
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
from_start,
from_end,
pointer_delta(from_end, from_start, sizeof(char)));
}
}
guarantee((HeapWord*)from_start <= from()->bottom(),
"from start moved to the right");
guarantee((HeapWord*)from_end >= from()->top(),
"from end moved into live data");
assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
assert(is_object_aligned((intptr_t)from_start), "checking alignment");
assert(is_object_aligned((intptr_t)to_start), "checking alignment");
MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
// Let's make sure the call to initialize doesn't reset "top"!
HeapWord* old_from_top = from()->top();
// For PrintAdaptiveSizePolicy block below
size_t old_from = from()->capacity();
size_t old_to = to()->capacity();
// If not clearing the spaces, do some checking to verify that
// the spaces are already mangled.
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into another
// a failure of the check may not correctly indicate which space
// is not properly mangled.
if (ZapUnusedHeapArea) {
HeapWord* limit = (HeapWord*) virtual_space()->high();
eden()->check_mangled_unused_area(limit);
from()->check_mangled_unused_area(limit);
to()->check_mangled_unused_area(limit);
}
// The call to initialize NULL's the next compaction space
eden()->initialize(edenMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
eden()->set_next_compaction_space(from());
to()->initialize(toMR ,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
from()->initialize(fromMR,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
assert(from()->top() == old_from_top, "from top changed!");
if (PrintAdaptiveSizePolicy) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
"collection: %d "
"(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
"(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
gch->total_collections(),
old_from, old_to,
from()->capacity(),
to()->capacity());
gclog_or_tty->cr();
}
}
示例5: mem_allocate_work
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) {
GenCollectedHeap *gch = GenCollectedHeap::heap();
debug_only(gch->check_for_valid_allocation_state());
assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
HeapWord* result = NULL;
// Loop until the allocation is satisfied, or unsatisfied after GC.
for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
HandleMark hm; // Discard any handles allocated in each iteration.
// First allocation attempt is lock-free.
Generation *young = gch->young_gen();
assert(young->supports_inline_contig_alloc(),
"Otherwise, must do alloc within heap lock");
if (young->should_allocate(size, is_tlab)) {
result = young->par_allocate(size, is_tlab);
if (result != NULL) {
assert(gch->is_in_reserved(result), "result not in heap");
return result;
}
}
uint gc_count_before; // Read inside the Heap_lock locked region.
{
MutexLocker ml(Heap_lock);
log_trace(gc, alloc)("GenCollectorPolicy::mem_allocate_work: attempting locked slow path allocation");
// Note that only large objects get a shot at being
// allocated in later generations.
bool first_only = ! should_try_older_generation_allocation(size);
result = gch->attempt_allocation(size, is_tlab, first_only);
if (result != NULL) {
assert(gch->is_in_reserved(result), "result not in heap");
return result;
}
if (GCLocker::is_active_and_needs_gc()) {
if (is_tlab) {
return NULL; // Caller will retry allocating individual object.
}
if (!gch->is_maximal_no_gc()) {
// Try and expand heap to satisfy request.
result = expand_heap_and_allocate(size, is_tlab);
// Result could be null if we are out of space.
if (result != NULL) {
return result;
}
}
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
return NULL; // We didn't get to do a GC and we didn't get any memory.
}
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
// Wait for JNI critical section to be exited
GCLocker::stall_until_clear();
gclocker_stalled_count += 1;
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return NULL;
}
}
// Read the gc count while the heap lock is held.
gc_count_before = gch->total_collections();
}
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
VMThread::execute(&op);
if (op.prologue_succeeded()) {
result = op.result();
if (op.gc_locked()) {
assert(result == NULL, "must be NULL if gc_locked() is true");
continue; // Retry and/or stall as necessary.
}
// Allocation has failed and a collection
// has been done. If the gc time limit was exceeded the
// this time, return NULL so that an out-of-memory
// will be thrown. Clear gc_overhead_limit_exceeded
//.........这里部分代码省略.........