本文整理汇总了C++中GenCollectedHeap::young_gen方法的典型用法代码示例。如果您正苦于以下问题:C++ GenCollectedHeap::young_gen方法的具体用法?C++ GenCollectedHeap::young_gen怎么用?C++ GenCollectedHeap::young_gen使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GenCollectedHeap
的用法示例。
在下文中一共展示了GenCollectedHeap::young_gen方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: should_try_older_generation_allocation
// Return true if any of the following is true:
// . the allocation won't fit into the current young gen heap
// . gc locker is occupied (jni critical section)
// . heap memory is tight -- the most recent previous collection
// was a full collection because a partial collection (would
// have) failed and is likely to fail again
bool GenCollectorPolicy::should_try_older_generation_allocation(
size_t word_size) const {
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t young_capacity = gch->young_gen()->capacity_before_gc();
return (word_size > heap_word_size(young_capacity))
|| GCLocker::is_active_and_needs_gc()
|| gch->incremental_collection_failed();
}
示例2: expand_heap_and_allocate
HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
bool is_tlab) {
GenCollectedHeap *gch = GenCollectedHeap::heap();
HeapWord* result = NULL;
Generation *old = gch->old_gen();
if (old->should_allocate(size, is_tlab)) {
result = old->expand_and_allocate(size, is_tlab);
}
if (result == NULL) {
Generation *young = gch->young_gen();
if (young->should_allocate(size, is_tlab)) {
result = young->expand_and_allocate(size, is_tlab);
}
}
assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
return result;
}
示例3: invoke_at_safepoint
void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
GenCollectedHeap* gch = GenCollectedHeap::heap();
#ifdef ASSERT
if (gch->collector_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earlier");
}
#endif
// hook up weak ref data so it can be used during Mark-Sweep
assert(ref_processor() == NULL, "no stomping");
assert(rp != NULL, "should be non-NULL");
set_ref_processor(rp);
rp->setup_policy(clear_all_softrefs);
gch->trace_heap_before_gc(_gc_tracer);
// When collecting the permanent generation Method*s may be moving,
// so we either have to flush all bcp data or convert it into bci.
CodeCache::gc_prologue();
// Increment the invocation count
_total_invocations++;
// Capture used regions for each generation that will be
// subject to collection, so that card table adjustments can
// be made intelligently (see clear / invalidate further below).
gch->save_used_regions();
allocate_stacks();
mark_sweep_phase1(clear_all_softrefs);
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
#if defined(COMPILER2) || INCLUDE_JVMCI
assert(DerivedPointerTable::is_active(), "Sanity");
DerivedPointerTable::set_active(false);
#endif
mark_sweep_phase3();
mark_sweep_phase4();
restore_marks();
// Set saved marks for allocation profiler (and other things? -- dld)
// (Should this be in general part?)
gch->save_marks();
deallocate_stacks();
// If compaction completely evacuated the young generation then we
// can clear the card table. Otherwise, we must invalidate
// it (consider all cards dirty). In the future, we might consider doing
// compaction within generations only, and doing card-table sliding.
CardTableRS* rs = gch->rem_set();
Generation* old_gen = gch->old_gen();
// Clear/invalidate below make use of the "prev_used_regions" saved earlier.
if (gch->young_gen()->used() == 0) {
// We've evacuated the young generation.
rs->clear_into_younger(old_gen);
} else {
// Invalidate the cards corresponding to the currently used
// region and clear those corresponding to the evacuated region.
rs->invalidate_or_clear(old_gen);
}
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
// refs processing: clean slate
set_ref_processor(NULL);
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();
// Update time of last gc for all generations we collected
// (which currently is all the generations in the heap).
// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
gch->update_time_of_last_gc(now);
gch->trace_heap_after_gc(_gc_tracer);
}
示例4: mem_allocate_work
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) {
GenCollectedHeap *gch = GenCollectedHeap::heap();
debug_only(gch->check_for_valid_allocation_state());
assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
HeapWord* result = NULL;
// Loop until the allocation is satisfied, or unsatisfied after GC.
for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
HandleMark hm; // Discard any handles allocated in each iteration.
// First allocation attempt is lock-free.
Generation *young = gch->young_gen();
assert(young->supports_inline_contig_alloc(),
"Otherwise, must do alloc within heap lock");
if (young->should_allocate(size, is_tlab)) {
result = young->par_allocate(size, is_tlab);
if (result != NULL) {
assert(gch->is_in_reserved(result), "result not in heap");
return result;
}
}
uint gc_count_before; // Read inside the Heap_lock locked region.
{
MutexLocker ml(Heap_lock);
log_trace(gc, alloc)("GenCollectorPolicy::mem_allocate_work: attempting locked slow path allocation");
// Note that only large objects get a shot at being
// allocated in later generations.
bool first_only = ! should_try_older_generation_allocation(size);
result = gch->attempt_allocation(size, is_tlab, first_only);
if (result != NULL) {
assert(gch->is_in_reserved(result), "result not in heap");
return result;
}
if (GCLocker::is_active_and_needs_gc()) {
if (is_tlab) {
return NULL; // Caller will retry allocating individual object.
}
if (!gch->is_maximal_no_gc()) {
// Try and expand heap to satisfy request.
result = expand_heap_and_allocate(size, is_tlab);
// Result could be null if we are out of space.
if (result != NULL) {
return result;
}
}
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
return NULL; // We didn't get to do a GC and we didn't get any memory.
}
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
// Wait for JNI critical section to be exited
GCLocker::stall_until_clear();
gclocker_stalled_count += 1;
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return NULL;
}
}
// Read the gc count while the heap lock is held.
gc_count_before = gch->total_collections();
}
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
VMThread::execute(&op);
if (op.prologue_succeeded()) {
result = op.result();
if (op.gc_locked()) {
assert(result == NULL, "must be NULL if gc_locked() is true");
continue; // Retry and/or stall as necessary.
}
// Allocation has failed and a collection
// has been done. If the gc time limit was exceeded the
// this time, return NULL so that an out-of-memory
// will be thrown. Clear gc_overhead_limit_exceeded
//.........这里部分代码省略.........