本文整理汇总了C++中MemRegion::start方法的典型用法代码示例。如果您正苦于以下问题:C++ MemRegion::start方法的具体用法?C++ MemRegion::start怎么用?C++ MemRegion::start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MemRegion
的用法示例。
在下文中一共展示了MemRegion::start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: younger_refs_in_space_iterate
void CardTableRS::younger_refs_in_space_iterate(Space* sp,
OopsInGenClosure* cl) {
const MemRegion urasm = sp->used_region_at_save_marks();
#ifdef ASSERT
// Convert the assertion check to a warning if we are running
// CMS+ParNew until related bug is fixed.
MemRegion ur = sp->used_region();
assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
err_msg("Did you forget to call save_marks()? "
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
"[" PTR_FORMAT ", " PTR_FORMAT ")",
urasm.start(), urasm.end(), ur.start(), ur.end()));
// In the case of CMS+ParNew, issue a warning
if (!ur.contains(urasm)) {
assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
warning("CMS+ParNew: Did you forget to call save_marks()? "
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
"[" PTR_FORMAT ", " PTR_FORMAT ")",
urasm.start(), urasm.end(), ur.start(), ur.end());
MemRegion ur2 = sp->used_region();
MemRegion urasm2 = sp->used_region_at_save_marks();
if (!ur.equals(ur2)) {
warning("CMS+ParNew: Flickering used_region()!!");
}
if (!urasm.equals(urasm2)) {
warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
}
ShouldNotReachHere();
}
#endif
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
}
示例2: oop_oop_iterate_elements_bounded
void ObjArrayKlass::oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureType* closure, MemRegion mr) {
if (UseCompressedOops) {
oop_oop_iterate_elements_specialized_bounded<nv, narrowOop>(a, closure, mr.start(), mr.end());
} else {
oop_oop_iterate_elements_specialized_bounded<nv, oop>(a, closure, mr.start(), mr.end());
}
}
示例3: fill_region_with_object
void SharedHeap::fill_region_with_object(MemRegion mr) {
// Disable allocation events, since this isn't a "real" allocation.
JVMPIAllocEventDisabler dis;
size_t word_size = mr.word_size();
size_t aligned_array_header_size =
align_object_size(typeArrayOopDesc::header_size(T_INT));
if (word_size >= aligned_array_header_size) {
const size_t array_length =
pointer_delta(mr.end(), mr.start()) -
typeArrayOopDesc::header_size(T_INT);
const size_t array_length_words =
array_length * (HeapWordSize/sizeof(jint));
post_allocation_setup_array(Universe::intArrayKlassObj(),
mr.start(),
mr.word_size(),
(int)array_length_words);
#ifdef ASSERT
HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT));
Memory::set_words(elt_words, array_length, 0xDEAFBABE);
#endif
} else {
assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?");
post_allocation_setup_obj(SystemDictionary::object_klass(),
mr.start(),
mr.word_size());
}
}
示例4: while
void
CardTableModRefBS::
process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) {
// We don't have to go downwards here; it wouldn't help anyway,
// because of parallelism.
// Find the first card address of the first chunk in the stride that is
// at least "bottom" of the used region.
jbyte* start_card = byte_for(used.start());
jbyte* end_card = byte_after(used.last());
uintptr_t start_chunk = addr_to_chunk_index(used.start());
uintptr_t start_chunk_stride_num = start_chunk % n_strides;
jbyte* chunk_card_start;
if ((uintptr_t)stride >= start_chunk_stride_num) {
chunk_card_start = (jbyte*)(start_card +
(stride - start_chunk_stride_num) *
CardsPerStrideChunk);
} else {
// Go ahead to the next chunk group boundary, then to the requested stride.
chunk_card_start = (jbyte*)(start_card +
(n_strides - start_chunk_stride_num + stride) *
CardsPerStrideChunk);
}
while (chunk_card_start < end_card) {
// We don't have to go downwards here; it wouldn't help anyway,
// because of parallelism. (We take care with "min_done"; see below.)
// Invariant: chunk_mr should be fully contained within the "used" region.
jbyte* chunk_card_end = chunk_card_start + CardsPerStrideChunk;
MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
chunk_card_end >= end_card ?
used.end() : addr_for(chunk_card_end));
assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
// Process the chunk.
process_chunk_boundaries(sp,
dcto_cl,
chunk_mr,
used,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
non_clean_card_iterate_work(chunk_mr, cl, clear);
// Find the next chunk of the stride.
chunk_card_start += CardsPerStrideChunk * n_strides;
}
}
示例5: clear_MemRegion
void CardTableRS::clear_MemRegion(MemRegion mr) {
jbyte* cur = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
assert(addr_for(cur) == mr.start(), "region must be card aligned");
while (cur < last) {
*cur = CardTableModRefBS::clean_card;
cur++;
}
}
示例6: fill_region_with_block
// The buffer comes with its own BOT, with a shared (obviously) underlying
// BlockOffsetSharedArray. We manipulate this BOT in the normal way
// as we would for any contiguous space. However, on accasion we
// need to do some buffer surgery at the extremities before we
// start using the body of the buffer for allocations. Such surgery
// (as explained elsewhere) is to prevent allocation on a card that
// is in the process of being walked concurrently by another GC thread.
// When such surgery happens at a point that is far removed (to the
// right of the current allocation point, top), we use the "contig"
// parameter below to directly manipulate the shared array without
// modifying the _next_threshold state in the BOT.
void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
bool contig) {
CollectedHeap::fill_with_object(mr);
if (contig) {
_bt.alloc_block(mr.start(), mr.end());
} else {
_bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
}
}
示例7: verify_space
void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
// We don't need to do young-gen spaces.
if (s->end() <= gen_boundary) return;
MemRegion used = s->used_region();
jbyte* cur_entry = byte_for(used.start());
jbyte* limit = byte_after(used.last());
while (cur_entry < limit) {
if (*cur_entry == CardTableModRefBS::clean_card) {
jbyte* first_dirty = cur_entry+1;
while (first_dirty < limit &&
*first_dirty == CardTableModRefBS::clean_card)
first_dirty++;
// If the first object is a regular object, and it has a
// young-to-old field, that would mark the previous card.
HeapWord* boundary = addr_for(cur_entry);
HeapWord* end = addr_for(first_dirty);
HeapWord* boundary_block = s->block_start(boundary);
HeapWord* begin = boundary; // Until proven otherwise.
HeapWord* start_block = boundary_block; // Until proven otherwise.
if (boundary_block < boundary) {
if (s->block_is_obj(boundary_block)) {
oop boundary_obj = oop(boundary_block);
if (!boundary_obj->is_objArray() &&
!boundary_obj->is_typeArray()) {
guarantee(cur_entry > byte_for(used.start()),
"else boundary would be boundary_block");
if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
begin = boundary_block + s->block_size(boundary_block);
start_block = begin;
}
}
}
}
// Now traverse objects until end.
HeapWord* cur = start_block;
VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
while (cur < end) {
if (s->block_is_obj(cur)) {
oop(cur)->oop_iterate(&verify_blk);
}
cur += s->block_size(cur);
}
cur_entry = first_dirty;
} else {
guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
"Illegal CT value");
// If we're in the parallel case, the cur and prev values are
// different, and we can't have left a prev in the table.
guarantee(cur_youngergen_card_val() == youngergen_card
|| !is_prev_youngergen_card_val(*cur_entry),
"Illegal CT value");
cur_entry++;
}
}
}
示例8: initialize
void MutableSpace::initialize(MemRegion mr,
bool clear_space,
bool mangle_space,
bool setup_pages) {
assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
"invalid space boundaries");
if (setup_pages && (UseNUMA || AlwaysPreTouch || UseColoredSpaces)) {
// The space may move left and right or expand/shrink.
// We'd like to enforce the desired page placement.
MemRegion head, tail;
if (last_setup_region().is_empty()) {
// If it's the first initialization don't limit the amount of work.
head = mr;
tail = MemRegion(mr.end(), mr.end());
} else {
// Is there an intersection with the address space?
MemRegion intersection = last_setup_region().intersection(mr);
if (intersection.is_empty()) {
intersection = MemRegion(mr.end(), mr.end());
}
// All the sizes below are in words.
size_t head_size = 0, tail_size = 0;
if (mr.start() <= intersection.start()) {
head_size = pointer_delta(intersection.start(), mr.start());
}
if(intersection.end() <= mr.end()) {
tail_size = pointer_delta(mr.end(), intersection.end());
}
// Limit the amount of page manipulation if necessary.
if (UseColoredSpaces) {
if (ColoredSpaceResizeRate > 0 && !AlwaysPreTouch) {
const size_t change_size = head_size + tail_size;
const float setup_rate_words = ColoredSpaceResizeRate >> LogBytesPerWord;
head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
head_size);
tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
tail_size);
}
} else {
if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
const size_t change_size = head_size + tail_size;
const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
head_size);
tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
tail_size);
}
}
head = MemRegion(intersection.start() - head_size, intersection.start());
tail = MemRegion(intersection.end(), intersection.end() + tail_size);
}
示例9: mangle_region
// Simply mangle the MemRegion mr.
void SpaceMangler::mangle_region(MemRegion mr) {
assert(ZapUnusedHeapArea, "Mangling should not be in use");
#ifdef ASSERT
if(TraceZapUnusedHeapArea) {
gclog_or_tty->print("Mangling [" PTR_FORMAT " to " PTR_FORMAT ")", p2i(mr.start()), p2i(mr.end()));
}
Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord);
if(TraceZapUnusedHeapArea) {
gclog_or_tty->print_cr(" done");
}
#endif
}
示例10: assert
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
MemRegion mr, bool init_to_zero) :
G1BlockOffsetTable(mr.start(), mr.end()),
_unallocated_block(_bottom),
_array(array), _csp(NULL),
_init_to_zero(init_to_zero) {
assert(_bottom <= _end, "arguments out of order");
if (!_init_to_zero) {
// initialize cards to point back to mr.start()
set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
_array->set_offset_array(0, 0); // set first card to 0
}
}
示例11: BlockOffsetTable
BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
MemRegion mr, bool init_to_zero_) :
BlockOffsetTable(mr.start(), mr.end()),
_array(array)
{
assert(_bottom <= _end, "arguments out of order");
set_init_to_zero(init_to_zero_);
if (!init_to_zero_) {
// initialize cards to point back to mr.start()
set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
_array->set_offset_array(0, 0); // set first card to 0
}
}
示例12: assert
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
MemRegion mr) :
G1BlockOffsetTable(mr.start(), mr.end()),
_unallocated_block(_bottom),
_array(array), _gsp(NULL) {
assert(_bottom <= _end, "arguments out of order");
}
示例13: _union
MemRegion MemRegion::_union(const MemRegion mr2) const {
// If one region is empty, return the other
if (is_empty()) return mr2;
if (mr2.is_empty()) return MemRegion(start(), end());
// Otherwise, regions must overlap or be adjacent
assert(((start() <= mr2.start()) && (end() >= mr2.start())) ||
((mr2.start() <= start()) && (mr2.end() >= start())),
"non-adjacent or overlapping regions");
MemRegion res;
HeapWord* res_start = MIN2(start(), mr2.start());
HeapWord* res_end = MAX2(end(), mr2.end());
res.set_start(res_start);
res.set_end(res_end);
return res;
}
示例14: initialize
// This is the shared initialization code. It sets up the basic pointers,
// and allows enough extra space for a filler object. We call a virtual
// method, "lab_is_valid()" to handle the different asserts the old/young
// labs require.
void PSPromotionLAB::initialize(MemRegion lab) {
assert(lab_is_valid(lab), "Sanity");
HeapWord* bottom = lab.start();
HeapWord* end = lab.end();
set_bottom(bottom);
set_end(end);
set_top(bottom);
// We can be initialized to a zero size!
if (free() > 0) {
if (ZapUnusedHeapArea) {
debug_only(Memory::set_words(top(), free()/HeapWordSize, badHeapWord));
}
// NOTE! We need to allow space for a filler object.
assert(lab.word_size() >= filler_header_size, "lab is too small");
end = end - filler_header_size;
set_end(end);
_state = needs_flush;
} else {
_state = zero_size;
}
assert(this->top() <= this->end(), "pointers out of order");
}
示例15: NOISY
void
CardTableModRefBS::
process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
MemRegion used,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size)
{
// We must worry about non-array objects that cross chunk boundaries,
// because such objects are both precisely and imprecisely marked:
// .. if the head of such an object is dirty, the entire object
// needs to be scanned, under the interpretation that this
// was an imprecise mark
// .. if the head of such an object is not dirty, we can assume
// precise marking and it's efficient to scan just the dirty
// cards.
// In either case, each scanned reference must be scanned precisely
// once so as to avoid cloning of a young referent. For efficiency,
// our closures depend on this property and do not protect against
// double scans.
uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index;
NOISY(tty->print_cr("===========================================================================");)