本文整理汇总了C++中CollectedHeap类的典型用法代码示例。如果您正苦于以下问题:C++ CollectedHeap类的具体用法?C++ CollectedHeap怎么用?C++ CollectedHeap使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CollectedHeap类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: doit
void VM_GC_HeapInspection::doit() {
HandleMark hm;
CollectedHeap* ch = Universe::heap();
ch->ensure_parsability(false); // must happen, even if collection does
// not happen (e.g. due to GC_locker)
if (_full_gc) {
// The collection attempt below would be skipped anyway if
// the gc locker is held. The following dump may then be a tad
// misleading to someone expecting only live objects to show
// up in the dump (see CR 6944195). Just issue a suitable warning
// in that case and do not attempt to do a collection.
// The latter is a subtle point, because even a failed attempt
// to GC will, in fact, induce one in the future, which we
// probably want to avoid in this case because the GC that we may
// be about to attempt holds value for us only
// if it happens now and not if it happens in the eventual
// future.
if (GC_locker::is_active()) {
warning("GC locker is held; pre-dump GC was skipped");
} else {
ch->collect_as_vm_thread(GCCause::_heap_inspection);
}
}
HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
}
示例2: assert
Pause_No_GC_Verifier::~Pause_No_GC_Verifier() {
if (_ngcv->_verifygc) {
// if we were verifying before, then reenable verification
CollectedHeap* h = Universe::heap();
assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
_ngcv->_old_invocations = h->total_collections();
}
}
示例3: doit
void VM_GC_ObjectAddressInfoCollection::doit() {
HandleMark hm;
CollectedHeap* ch = Universe::heap();
ch->ensure_parsability(false); // must happen, even if collection does
// not happen (e.g. due to GC_locker)
ObjectAddressInfoCollection::collect_object_address_info(_addrinfo_log,
_krinfo_log, _reason);
}
示例4: AdaptiveSizePolicyOutput
// The special value of a zero count can be used to ignore
// the count test.
AdaptiveSizePolicyOutput(uint count) {
if (UseAdaptiveSizePolicy && (AdaptiveSizePolicyOutputInterval > 0)) {
CollectedHeap* heap = Universe::heap();
_size_policy = heap->size_policy();
_do_print = print_test(count);
} else {
_size_policy = NULL;
_do_print = false;
}
}
示例5: blk
void CardTableRS::verify() {
// At present, we only know how to verify the card table RS for
// generational heaps.
VerifyCTGenClosure blk(this);
CollectedHeap* ch = Universe::heap();
if (ch->kind() == CollectedHeap::GenCollectedHeap) {
GenCollectedHeap::heap()->generation_iterate(&blk, false);
_ct_bs->verify();
}
}
示例6: update_barrier_set
inline void update_barrier_set(oop *p, oop v) {
assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
oopDesc::bs()->write_ref_field(p, v);
if (UseTrainGC) {
// Each generation has a chance to examine the oop.
CollectedHeap* gch = Universe::heap();
// This is even more bogus.
if (gch->kind() == CollectedHeap::GenCollectedHeap) {
((GenCollectedHeap*)gch)->examine_modified_oop(p);
}
}
}
示例7: adr_receiver
void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
// Currently, this interface is called only during card-scanning for
// a young gen gc, in which case this object cannot contribute anything,
// since it does not contain any references that cross out of
// the perm gen. However, for future more general use we allow
// the possibility of calling for instance from more general
// iterators (for example, a future regionalized perm gen for G1,
// or the possibility of moving some references out of perm in
// the case of other collectors). In that case, you will need
// to relax or remove some of the assertions below.
#ifdef ASSERT
// Verify that none of the embedded oop references cross out of
// this generation.
for (uint row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) {
oop* adr = adr_receiver(row);
CollectedHeap* h = Universe::heap();
assert(h->is_permanent(adr) && h->is_permanent_or_null(*adr), "Not intra-perm");
}
}
#endif // ASSERT
assert(!blk->should_remember_mdo(), "Not expected to remember MDO");
return; // Nothing to do, see comment above
#if 0
if (blk->should_remember_mdo()) {
// This is a set of weak references that need
// to be followed at the end of the strong marking
// phase. Memoize this object so it can be visited
// in the weak roots processing phase.
blk->remember_mdo(data());
} else { // normal scan
for (uint row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) {
oop* adr = adr_receiver(row);
if (mr.contains(adr)) {
blk->do_oop(adr);
} else if ((HeapWord*)adr >= mr.end()) {
// Test that the current cursor and the two ends of the range
// that we may have skipped iterating over are monotonically ordered;
// this is just a paranoid assertion, just in case represetations
// should change in the future rendering the short-circuit return
// here invalid.
assert((row+1 >= row_limit() || adr_receiver(row+1) > adr) &&
(row+2 >= row_limit() || adr_receiver(row_limit()-1) > adr_receiver(row+1)), "Reducing?");
break; // remaining should be outside this mr too
}
}
}
}
#endif
}
示例8: compute_survivor_size
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
// All space sizes must be multiples of car size in order for the CarTable to work.
// Note that the CarTable is used with and without train gc (for fast lookup).
uintx alignment = CarSpace::car_size();
// Compute sizes
uintx size = _virtual_space.committed_size();
uintx survivor_size = compute_survivor_size(size, alignment);
uintx eden_size = size - (2*survivor_size);
assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
if (eden_size < minimum_eden_size) {
// May happen due to 64Kb rounding, if so adjust eden size back up
minimum_eden_size = align_size_up(minimum_eden_size, alignment);
uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
uintx unaligned_survivor_size =
align_size_down(maximum_survivor_size, alignment);
survivor_size = MAX2(unaligned_survivor_size, alignment);
eden_size = size - (2*survivor_size);
assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
assert(eden_size >= minimum_eden_size, "just checking");
}
char *eden_start = _virtual_space.low();
char *from_start = eden_start + eden_size;
char *to_start = from_start + survivor_size;
char *to_end = to_start + survivor_size;
assert(to_end == _virtual_space.high(), "just checking");
assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
eden()->initialize(edenMR, (minimum_eden_size == 0));
from()->initialize(fromMR, true);
to()->initialize(toMR , true);
if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) {
CollectedHeap* ch = Universe::heap();
jvmpi::post_arena_new_event(ch->addr_to_arena_id(eden_start), "Eden");
jvmpi::post_arena_new_event(ch->addr_to_arena_id(from_start), "Semi");
jvmpi::post_arena_new_event(ch->addr_to_arena_id(to_start), "Semi");
}
}
示例9: swap_spaces
void DefNewGeneration::swap_spaces() {
CollectedHeap* ch = Universe::heap();
if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_DELETE)) {
jvmpi::post_arena_delete_event(ch->addr_to_arena_id(from()->bottom()));
}
if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) {
jvmpi::post_arena_new_event(ch->addr_to_arena_id(from()->bottom()), "Semi");
}
ContiguousSpace* s = from();
_from_space = to();
_to_space = s;
if (UsePerfData) {
CSpaceCounters* c = _from_counters;
_from_counters = _to_counters;
_to_counters = c;
}
}
示例10: TEST_VM
TEST_VM(CollectedHeap, is_in) {
CollectedHeap* heap = Universe::heap();
uintptr_t epsilon = (uintptr_t) MinObjAlignment;
uintptr_t heap_start = (uintptr_t) heap->reserved_region().start();
uintptr_t heap_end = (uintptr_t) heap->reserved_region().end();
// Test that NULL is not in the heap.
ASSERT_FALSE(heap->is_in(NULL)) << "NULL is unexpectedly in the heap";
// Test that a pointer to before the heap start is reported as outside the heap.
ASSERT_GE(heap_start, ((uintptr_t) NULL + epsilon))
<< "Sanity check - heap should not start at 0";
void* before_heap = (void*) (heap_start - epsilon);
ASSERT_FALSE(heap->is_in(before_heap)) << "before_heap: " << p2i(before_heap)
<< " is unexpectedly in the heap";
// Test that a pointer to after the heap end is reported as outside the heap.
ASSERT_LE(heap_end, ((uintptr_t)-1 - epsilon))
<< "Sanity check - heap should not end at the end of address space";
void* after_heap = (void*) (heap_end + epsilon);
ASSERT_FALSE(heap->is_in(after_heap)) << "after_heap: " << p2i(after_heap)
<< " is unexpectedly in the heap";
}
示例11: findref
static void findref(intptr_t x) {
CollectedHeap *ch = Universe::heap();
LookForRefInGenClosure lookFor;
lookFor.target = (oop) x;
LookForRefInObjectClosure look_in_object((oop) x);
tty->print_cr("Searching heap:");
ch->object_iterate(&look_in_object);
tty->print_cr("Searching strong roots:");
Universe::oops_do(&lookFor, false);
JNIHandles::oops_do(&lookFor); // Global (strong) JNI handles
Threads::oops_do(&lookFor, NULL);
ObjectSynchronizer::oops_do(&lookFor);
//FlatProfiler::oops_do(&lookFor);
SystemDictionary::oops_do(&lookFor);
tty->print_cr("Searching code cache:");
CodeCache::oops_do(&lookFor);
tty->print_cr("Done.");
}
示例12: assert
typeArrayOop typeArrayKlass::allocate(int length, TRAPS) {
assert(log2_element_size() >= 0, "bad scale");
if (length >= 0) {
if (length <= max_length()) {
size_t size = typeArrayOopDesc::object_size(layout_helper(), length);
KlassHandle h_k(THREAD, as_klassOop());
typeArrayOop t;
CollectedHeap* ch = Universe::heap();
if (size < ch->large_typearray_limit()) {
t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
} else {
t = (typeArrayOop)CollectedHeap::large_typearray_allocate(h_k, (int)size, length, CHECK_NULL);
}
assert(t->is_parsable(), "Don't publish unless parsable");
return t;
} else {
THROW_OOP_0(Universe::out_of_memory_error_array_size());
}
} else {
THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
}
}
示例13: out
inline void GCTraceTimeImpl::log_stop(jlong start_counter, jlong stop_counter) {
double duration_in_ms = TimeHelper::counter_to_millis(stop_counter - start_counter);
double start_time_in_secs = TimeHelper::counter_to_seconds(start_counter);
double stop_time_in_secs = TimeHelper::counter_to_seconds(stop_counter);
LogStream out(_out_stop);
out.print("%s", _title);
if (_gc_cause != GCCause::_no_gc) {
out.print(" (%s)", GCCause::to_string(_gc_cause));
}
if (_heap_usage_before != SIZE_MAX) {
CollectedHeap* heap = Universe::heap();
size_t used_before_m = _heap_usage_before / M;
size_t used_m = heap->used() / M;
size_t capacity_m = heap->capacity() / M;
out.print(" " LOG_STOP_HEAP_FORMAT, used_before_m, used_m, capacity_m);
}
out.print_cr(" " LOG_STOP_TIME_FORMAT, start_time_in_secs, stop_time_in_secs, duration_in_ms);
}
示例14: blk
void CardTableRS::verify() {
// At present, we only know how to verify the card table RS for
// generational heaps.
VerifyCTGenClosure blk(this);
CollectedHeap* ch = Universe::heap();
// We will do the perm-gen portion of the card table, too.
Generation* pg = SharedHeap::heap()->perm_gen();
HeapWord* pg_boundary = pg->reserved().start();
if (ch->kind() == CollectedHeap::GenCollectedHeap) {
GenCollectedHeap::heap()->generation_iterate(&blk, false);
_ct_bs->verify();
// If the old gen collections also collect perm, then we are only
// interested in perm-to-young pointers, not perm-to-old pointers.
GenCollectedHeap* gch = GenCollectedHeap::heap();
CollectorPolicy* cp = gch->collector_policy();
if (cp->is_mark_sweep_policy() || cp->is_concurrent_mark_sweep_policy()) {
pg_boundary = gch->get_gen(1)->reserved().start();
}
}
VerifyCTSpaceClosure perm_space_blk(this, pg_boundary);
SharedHeap::heap()->perm_gen()->space_iterate(&perm_space_blk, true);
}
示例15: assert
ParallelScavengeHeap* ParallelScavengeHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
return (ParallelScavengeHeap*)heap;
}