本文整理汇总了C++中oop::size方法的典型用法代码示例。如果您正苦于以下问题:C++ oop::size方法的具体用法?C++ oop::size怎么用?C++ oop::size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类oop
的用法示例。
在下文中一共展示了oop::size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: record_instance
// Return false if the entry could not be recorded on account
// of running out of space required to create a new entry.
bool KlassInfoTable::record_instance(const oop obj) {
Klass* k = obj->klass();
KlassInfoEntry* elt = lookup(k);
// elt may be NULL if it's a new klass for which we
// could not allocate space for a new entry in the hashtable.
if (elt != NULL) {
elt->set_count(elt->count() + 1);
elt->set_words(elt->words() + obj->size());
_size_of_instances_in_words += obj->size();
return true;
} else {
return false;
}
}
示例2: do_object
void do_object(oop obj) {
if (obj->is_shared()) {
return;
}
if (obj->is_gc_marked() && obj->forwardee() == NULL) {
int s = obj->size();
oop sh_obj = (oop)_space->allocate(s);
if (sh_obj == NULL) {
if (_read_only) {
warning("\nThe permanent generation read only space is not large "
"enough to \npreload requested classes. Use "
"-XX:SharedReadOnlySize= to increase \nthe initial "
"size of the read only space.\n");
} else {
warning("\nThe permanent generation read write space is not large "
"enough to \npreload requested classes. Use "
"-XX:SharedReadWriteSize= to increase \nthe initial "
"size of the read write space.\n");
}
exit(2);
}
if (PrintSharedSpaces && Verbose && WizardMode) {
tty->print_cr("\nMoveMarkedObjects: " PTR_FORMAT " -> " PTR_FORMAT " %s", obj, sh_obj,
(_read_only ? "ro" : "rw"));
}
Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)sh_obj, s);
obj->forward_to(sh_obj);
if (_read_only) {
// Readonly objects: set hash value to self pointer and make gc_marked.
sh_obj->forward_to(sh_obj);
} else {
sh_obj->init_mark();
}
}
}
示例3: do_object
void ClassifyObjectClosure::do_object(oop obj) {
int i = classify_object(obj, true);
++object_count[i];
++total_object_count;
size_t size = obj->size() * HeapWordSize;
object_size[i] += size;
total_object_size += size;
}
示例4: handle_promotion_failure
void DefNewGeneration::handle_promotion_failure(oop old) {
log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());
_promotion_failed = true;
_promotion_failed_info.register_copy_failure(old->size());
preserve_mark_if_necessary(old, old->mark());
// forward to self
old->forward_to(old);
_promo_failure_scan_stack.push(old);
if (!_promo_failure_drain_in_progress) {
// prevent recursion in copy_to_survivor_space()
_promo_failure_drain_in_progress = true;
drain_promo_failure_scan_stack();
_promo_failure_drain_in_progress = false;
}
}
示例5: mark_obj
inline bool PSParallelCompact::mark_obj(oop obj) {
const int obj_size = obj->size();
if (mark_bitmap()->mark_obj(obj, obj_size)) {
_summary_data.add_obj(obj, obj_size);
return true;
} else {
return false;
}
}
示例6: handle_promotion_failure
void DefNewGeneration::handle_promotion_failure(oop old) {
if (PrintPromotionFailure && !_promotion_failed) {
gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
old->size());
}
_promotion_failed = true;
_promotion_failed_info.register_copy_failure(old->size());
preserve_mark_if_necessary(old, old->mark());
// forward to self
old->forward_to(old);
_promo_failure_scan_stack.push(old);
if (!_promo_failure_drain_in_progress) {
// prevent recursion in copy_to_survivor_space()
_promo_failure_drain_in_progress = true;
drain_promo_failure_scan_stack();
_promo_failure_drain_in_progress = false;
}
}
示例7: unallocate_object
bool PSPromotionLAB::unallocate_object(oop obj) {
assert(Universe::heap()->is_in(obj), "Object outside heap");
if (contains(obj)) {
HeapWord* object_end = (HeapWord*)obj + obj->size();
assert(object_end <= top(), "Object crosses promotion LAB boundary");
if (object_end == top()) {
set_top((HeapWord*)obj);
return true;
}
}
return false;
}
示例8: copy_to_survivor_space
oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
assert(is_in_reserved(old) && !old->is_forwarded(),
"shouldn't be scavenging this oop");
size_t s = old->size();
oop obj = NULL;
// Try allocating obj in to-space (unless too old or won't fit or JVMPI
// enabled)
if (old->age() < tenuring_threshold() &&
!Universe::jvmpi_slow_allocation()) {
obj = (oop) to()->allocate(s);
}
// Otherwise try allocating obj tenured
if (obj == NULL) {
obj = _next_gen->promote(old, s, from);
if (obj == NULL) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
// is incorrectly set. In any case, its seriously wrong to be here!
vm_exit_out_of_memory(s*wordSize, "promotion");
}
} else {
// Prefetch beyond obj
const intx interval = PrefetchCopyIntervalInBytes;
atomic::prefetch_write(obj, interval);
// Copy obj
Memory::copy_words_aligned((HeapWord*)old, (HeapWord*)obj, s);
// Increment age if obj still in new generation
obj->incr_age();
age_table()->add(obj, s);
}
if (Universe::jvmpi_move_event_enabled()) {
Universe::jvmpi_object_move(old, obj);
}
// Done, insert forward pointer to obj in this header
old->forward_to(obj);
return obj;
}
示例9: copy_to_survivor_space
oop DefNewGeneration::copy_to_survivor_space(oop old) {
assert(is_in_reserved(old) && !old->is_forwarded(),
"shouldn't be scavenging this oop");
size_t s = old->size();
oop obj = NULL;
// Try allocating obj in to-space (unless too old)
if (old->age() < tenuring_threshold()) {
obj = (oop) to()->allocate(s);
}
// Otherwise try allocating obj tenured
if (obj == NULL) {
obj = _next_gen->promote(old, s);
if (obj == NULL) {
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
// is incorrectly set. In any case, its seriously wrong to be here!
vm_exit_out_of_memory(s*wordSize, "promotion");
}
handle_promotion_failure(old);
return old;
}
} else {
// Prefetch beyond obj
const intx interval = PrefetchCopyIntervalInBytes;
Prefetch::write(obj, interval);
// Copy obj
Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
// Increment age if obj still in new generation
obj->incr_age();
age_table()->add(obj, s);
}
// Done, insert forward pointer to obj in this header
old->forward_to(obj);
return obj;
}
示例10: copy_to_survivor_space
oop DefNewGeneration::copy_to_survivor_space(oop old) {
assert(is_in_reserved(old) && !old->is_forwarded(),
"shouldn't be scavenging this oop");
size_t s = old->size();
oop obj = NULL;
// Try allocating obj in to-space (unless too old)
if (old->age() < tenuring_threshold()) {
obj = (oop) to()->allocate(s);
}
// Otherwise try allocating obj tenured
if (obj == NULL) {
obj = _next_gen->promote(old, s);
if (obj == NULL) {
handle_promotion_failure(old);
return old;
}
} else {
// Prefetch beyond obj
const intx interval = PrefetchCopyIntervalInBytes;
Prefetch::write(obj, interval);
// Copy obj
Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
// Increment age if obj still in new generation
obj->incr_age();
age_table()->add(obj, s);
}
// Done, insert forward pointer to obj in this header
old->forward_to(obj);
return obj;
}
示例11: do_object
// <original comment>
// The original idea here was to coalesce evacuated and dead objects.
// However that caused complications with the block offset table (BOT).
// In particular if there were two TLABs, one of them partially refined.
// |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
// The BOT entries of the unrefined part of TLAB_2 point to the start
// of TLAB_2. If the last object of the TLAB_1 and the first object
// of TLAB_2 are coalesced, then the cards of the unrefined part
// would point into middle of the filler object.
// The current approach is to not coalesce and leave the BOT contents intact.
// </original comment>
//
// We now reset the BOT when we start the object iteration over the
// region and refine its entries for every object we come across. So
// the above comment is not really relevant and we should be able
// to coalesce dead objects if we want to.
void do_object(oop obj) {
HeapWord* obj_addr = (HeapWord*) obj;
assert(_hr->is_in(obj_addr), "sanity");
size_t obj_size = obj->size();
HeapWord* obj_end = obj_addr + obj_size;
if (_end_of_last_gap != obj_addr) {
// there was a gap before obj_addr
_last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
}
if (obj->is_forwarded() && obj->forwardee() == obj) {
// The object failed to move.
// We consider all objects that we find self-forwarded to be
// live. What we'll do is that we'll update the prev marking
// info so that they are all under PTAMS and explicitly marked.
if (!_cm->isPrevMarked(obj)) {
_cm->markPrev(obj);
}
if (_during_initial_mark) {
// For the next marking info we'll only mark the
// self-forwarded objects explicitly if we are during
// initial-mark (since, normally, we only mark objects pointed
// to by roots if we succeed in copying them). By marking all
// self-forwarded objects we ensure that we mark any that are
// still pointed to be roots. During concurrent marking, and
// after initial-mark, we don't need to mark any objects
// explicitly and all objects in the CSet are considered
// (implicitly) live. So, we won't mark them explicitly and
// we'll leave them over NTAMS.
_cm->grayRoot(obj, obj_size, _worker_id, _hr);
}
_marked_bytes += (obj_size * HeapWordSize);
obj->set_mark(markOopDesc::prototype());
// While we were processing RSet buffers during the collection,
// we actually didn't scan any cards on the collection set,
// since we didn't want to update remembered sets with entries
// that point into the collection set, given that live objects
// from the collection set are about to move and such entries
// will be stale very soon.
// This change also dealt with a reliability issue which
// involved scanning a card in the collection set and coming
// across an array that was being chunked and looking malformed.
// The problem is that, if evacuation fails, we might have
// remembered set entries missing given that we skipped cards on
// the collection set. So, we'll recreate such entries now.
obj->oop_iterate(_update_rset_cl);
} else {
// The object has been either evacuated or is dead. Fill it with a
// dummy object.
MemRegion mr(obj_addr, obj_size);
CollectedHeap::fill_with_object(mr);
// must nuke all dead objects which we skipped when iterating over the region
_cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
}
_end_of_last_gap = obj_end;
_last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
}
示例12: record_instance
void KlassInfoTable::record_instance(const oop obj) {
klassOop k = obj->klass();
KlassInfoEntry* elt = lookup(k);
elt->set_count(elt->count() + 1);
elt->set_words(elt->words() + obj->size());
}
示例13: handle_evacuation_failure_par
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
oop const old,
markOop const old_mark) {
const size_t word_sz = old->size();
HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
// +1 to make the -1 indexes valid...
const int young_index = from_region->young_index_in_cset()+1;
assert( (from_region->is_young() && young_index > 0) ||
(!from_region->is_young() && young_index == 0), "invariant" );
const AllocationContext_t context = from_region->allocation_context();
uint age = 0;
InCSetState dest_state = next_state(state, old_mark, age);
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
// PLAB allocations should succeed most of the time, so we'll
// normally check against NULL once and that's it.
if (obj_ptr == NULL) {
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
if (obj_ptr == NULL) {
obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
return handle_evacuation_failure_par(old, old_mark);
}
}
}
assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1h->evacuation_should_fail()) {
// Doing this after all the allocation attempts also tests the
// undo_allocation() method too.
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
return handle_evacuation_failure_par(old, old_mark);
}
#endif // !PRODUCT
// We're going to allocate linearly, so might as well prefetch ahead.
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
const oop obj = oop(obj_ptr);
const oop forward_ptr = old->forward_to_atomic(obj);
if (forward_ptr == NULL) {
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
if (dest_state.is_young()) {
if (age < markOopDesc::max_age) {
age++;
}
if (old_mark->has_displaced_mark_helper()) {
// In this case, we have to install the mark word first,
// otherwise obj looks to be forwarded (the old mark word,
// which contains the forward pointer, was copied)
obj->set_mark(old_mark);
markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
old_mark->set_displaced_mark_helper(new_mark);
} else {
obj->set_mark(old_mark->set_age(age));
}
age_table()->add(age, word_sz);
} else {
obj->set_mark(old_mark);
}
if (G1StringDedup::is_enabled()) {
const bool is_from_young = state.is_young();
const bool is_to_young = dest_state.is_young();
assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(),
"sanity");
assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(),
"sanity");
G1StringDedup::enqueue_from_evacuation(is_from_young,
is_to_young,
_worker_id,
obj);
}
size_t* const surv_young_words = surviving_young_words();
surv_young_words[young_index] += word_sz;
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
// We keep track of the next start index in the length field of
// the to-space object. The actual length can be found in the
// length field of the from-space object.
arrayOop(obj)->set_length(0);
oop* old_p = set_partial_array_mask(old);
push_on_queue(old_p);
} else {
HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr);
_scanner.set_region(to_region);
obj->oop_iterate_backwards(&_scanner);
}
return obj;
} else {
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
//.........这里部分代码省略.........
示例14: do_object
void do_object(oop obj) {
Klass* k = obj->blueprint();
k->set_alloc_count(k->alloc_count() + 1);
k->set_alloc_size(k->alloc_size() + obj->size());
}
示例15: copy_to_survivor_space
inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
assert(should_scavenge(&o), "Sanity");
oop new_obj = NULL;
// NOTE! We must be very careful with any methods that access the mark
// in o. There may be multiple threads racing on it, and it may be forwarded
// at any time. Do not use oop methods for accessing the mark!
markOop test_mark = o->mark();
// The same test as "o->is_forwarded()"
if (!test_mark->is_marked()) {
bool new_obj_is_tenured = false;
size_t new_obj_size = o->size();
// Find the objects age, MT safe.
uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
test_mark->displaced_mark_helper()->age() : test_mark->age();
if (!promote_immediately) {
// Try allocating obj in to-space (unless too old)
if (age < PSScavenge::tenuring_threshold()) {
new_obj = (oop) _young_lab.allocate(new_obj_size);
if (new_obj == NULL && !_young_gen_is_full) {
// Do we allocate directly, or flush and refill?
if (new_obj_size > (YoungPLABSize / 2)) {
// Allocate this object directly
new_obj = (oop)young_space()->cas_allocate(new_obj_size);
promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
} else {
// Flush and fill
_young_lab.flush();
HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
if (lab_base != NULL) {
_young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
// Try the young lab allocation again.
new_obj = (oop) _young_lab.allocate(new_obj_size);
promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
} else {
_young_gen_is_full = true;
}
}
}
}
}
// Otherwise try allocating obj tenured
if (new_obj == NULL) {
#ifndef PRODUCT
if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
return oop_promotion_failed(o, test_mark);
}
#endif // #ifndef PRODUCT
new_obj = (oop) _old_lab.allocate(new_obj_size);
new_obj_is_tenured = true;
if (new_obj == NULL) {
if (!_old_gen_is_full) {
// Do we allocate directly, or flush and refill?
if (new_obj_size > (OldPLABSize / 2)) {
// Allocate this object directly
new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
} else {
// Flush and fill
_old_lab.flush();
HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
if(lab_base != NULL) {
#ifdef ASSERT
// Delay the initialization of the promotion lab (plab).
// This exposes uninitialized plabs to card table processing.
if (GCWorkerDelayMillis > 0) {
os::sleep(Thread::current(), GCWorkerDelayMillis, false);
}
#endif
_old_lab.initialize(MemRegion(lab_base, OldPLABSize));
// Try the old lab allocation again.
new_obj = (oop) _old_lab.allocate(new_obj_size);
promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
}
}
}
// This is the promotion failed test, and code handling.
// The code belongs here for two reasons. It is slightly
// different than the code below, and cannot share the
// CAS testing code. Keeping the code here also minimizes
// the impact on the common case fast path code.
if (new_obj == NULL) {
_old_gen_is_full = true;
return oop_promotion_failed(o, test_mark);
}
}
}
assert(new_obj != NULL, "allocation should have succeeded");
//.........这里部分代码省略.........