本文整理汇总了C++中MemRegion函数的典型用法代码示例。如果您正苦于以下问题:C++ MemRegion函数的具体用法?C++ MemRegion怎么用?C++ MemRegion使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MemRegion函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
HeapWord* res = NULL;
if (_true_end > _hard_end) {
assert((HeapWord*)align_size_down(intptr_t(_hard_end),
ChunkSizeInBytes) == _hard_end,
"or else _true_end should be equal to _hard_end");
assert(_retained, "or else _true_end should be equal to _hard_end");
assert(_retained_filler.end() <= _top, "INVARIANT");
CollectedHeap::fill_with_object(_retained_filler);
if (_top < _hard_end) {
fill_region_with_block(MemRegion(_top, _hard_end), true);
}
HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
_retained_filler = MemRegion(_hard_end, FillerHeaderSize);
_bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
_top = _retained_filler.end();
_hard_end = next_hard_end;
_end = _hard_end - AlignmentReserve;
res = ParGCAllocBuffer::allocate(word_sz);
if (res != NULL) {
_bt.alloc_block(res, word_sz);
}
}
return res;
}
示例2: assert
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
size_t& num_regions_deleted) {
assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
if (_regions.length() == 0) {
num_regions_deleted = 0;
return MemRegion();
}
int j = _regions.length() - 1;
HeapWord* end = _regions.at(j)->end();
HeapWord* last_start = end;
while (j >= 0 && shrink_bytes > 0) {
HeapRegion* cur = _regions.at(j);
// We have to leave humongous regions where they are,
// and work around them.
if (cur->isHumongous()) {
return MemRegion(last_start, end);
}
cur->reset_zero_fill();
assert(cur == _regions.top(), "Should be top");
if (!cur->is_empty()) break;
shrink_bytes -= cur->capacity();
num_regions_deleted++;
_regions.pop();
last_start = cur->bottom();
// We need to delete these somehow, but can't currently do so here: if
// we do, the ZF thread may still access the deleted region. We'll
// leave this here as a reminder that we have to do something about
// this.
// delete cur;
j--;
}
return MemRegion(last_start, end);
}
示例3: assert
void MutableSpace::initialize(MemRegion mr,
bool clear_space,
bool mangle_space,
bool setup_pages) {
assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
"invalid space boundaries");
if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
// The space may move left and right or expand/shrink.
// We'd like to enforce the desired page placement.
MemRegion head, tail;
if (last_setup_region().is_empty()) {
// If it's the first initialization don't limit the amount of work.
head = mr;
tail = MemRegion(mr.end(), mr.end());
} else {
// Is there an intersection with the address space?
MemRegion intersection = last_setup_region().intersection(mr);
if (intersection.is_empty()) {
intersection = MemRegion(mr.end(), mr.end());
}
// All the sizes below are in words.
size_t head_size = 0, tail_size = 0;
if (mr.start() <= intersection.start()) {
head_size = pointer_delta(intersection.start(), mr.start());
}
if(intersection.end() <= mr.end()) {
tail_size = pointer_delta(mr.end(), intersection.end());
}
// Limit the amount of page manipulation if necessary.
if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
const size_t change_size = head_size + tail_size;
const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
head_size);
tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
tail_size);
}
head = MemRegion(intersection.start() - head_size, intersection.start());
tail = MemRegion(intersection.end(), intersection.end() + tail_size);
}
assert(mr.contains(head) && mr.contains(tail), "Sanity");
if (UseNUMA) {
numa_setup_pages(head, clear_space);
numa_setup_pages(tail, clear_space);
}
if (AlwaysPreTouch) {
pretouch_pages(head);
pretouch_pages(tail);
}
// Remember where we stopped so that we can continue later.
set_last_setup_region(MemRegion(head.start(), tail.end()));
}
示例4:
void
ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
ParGCAllocBuffer::undo_allocation(obj, word_sz);
// This may back us up beyond the previous threshold, so reset.
_bt.set_region(MemRegion(_top, _hard_end));
_bt.initialize_threshold();
}
示例5: ParGCAllocBuffer
ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
BlockOffsetSharedArray* bsa) :
ParGCAllocBuffer(word_sz),
_bsa(bsa),
_bt(bsa, MemRegion(_bottom, _hard_end)),
_true_end(_hard_end)
{}
示例6: assert
bool G1ArchiveAllocator::alloc_new_region() {
// Allocate the highest free region in the reserved heap,
// and add it to our list of allocated regions. It is marked
// archive and added to the old set.
HeapRegion* hr = _g1h->alloc_highest_free_region();
if (hr == NULL) {
return false;
}
assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
hr->set_archive();
_g1h->old_set_add(hr);
_g1h->hr_printer()->alloc(hr);
_allocated_regions.append(hr);
_allocation_region = hr;
// Set up _bottom and _max to begin allocating in the lowest
// min_region_size'd chunk of the allocated G1 region.
_bottom = hr->bottom();
_max = _bottom + HeapRegion::min_region_size_in_words();
// Tell mark-sweep that objects in this region are not to be marked.
G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
// Since we've modified the old set, call update_sizes.
_g1h->g1mm()->update_sizes();
return true;
}
示例7: assert
// count is number of array elements being written
void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
assert(count <= (size_t)max_intx, "count too large");
HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
// In the case of compressed oops, start and end may potentially be misaligned;
// so we need to conservatively align the first downward (this is not
// strictly necessary for current uses, but a case of good hygiene and,
// if you will, aesthetics) and the second upward (this is essential for
// current uses) to a HeapWord boundary, so we mark all cards overlapping
// this write. If this evolves in the future to calling a
// logging barrier of narrow oop granularity, like the pre-barrier for G1
// (mentioned here merely by way of example), we will need to change this
// interface, so it is "exactly precise" (if i may be allowed the adverbial
// redundancy for emphasis) and does not include narrow oop slots not
// included in the original write interval.
HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
// If compressed oops were not being used, these should already be aligned
assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
"Expected heap word alignment of start and end");
#if 0
warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
start, count, aligned_start, aligned_end);
#endif
write_ref_array_work(MemRegion(aligned_start, aligned_end));
}
示例8: CardGeneration
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size, int level,
GenRemSet* remset) :
CardGeneration(rs, initial_byte_size, level, remset)
{
HeapWord* bottom = (HeapWord*) _virtual_space.low();
HeapWord* end = (HeapWord*) _virtual_space.high();
_the_space = new TenuredSpace(_bts, MemRegion(bottom, end));
_the_space->reset_saved_mark();
_shrink_factor = 0;
_capacity_at_prologue = 0;
_gc_stats = new GCStats();
// initialize performance counters
const char* gen_name = "old";
GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
// Generation Counters -- generation 1, 1 subspace
_gen_counters = new GenerationCounters(gen_name, 1, 1,
gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
_gc_counters = new CollectorCounters("MSC", 1);
_space_counters = new CSpaceCounters(gen_name, 0,
_virtual_space.reserved_size(),
_the_space, _gen_counters);
}
示例9: process_stride
void
CardTableModRefBS::
process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) {
// We don't have to go downwards here; it wouldn't help anyway,
// because of parallelism.
// Find the first card address of the first chunk in the stride that is
// at least "bottom" of the used region.
jbyte* start_card = byte_for(used.start());
jbyte* end_card = byte_after(used.last());
uintptr_t start_chunk = addr_to_chunk_index(used.start());
uintptr_t start_chunk_stride_num = start_chunk % n_strides;
jbyte* chunk_card_start;
if ((uintptr_t)stride >= start_chunk_stride_num) {
chunk_card_start = (jbyte*)(start_card +
(stride - start_chunk_stride_num) *
CardsPerStrideChunk);
} else {
// Go ahead to the next chunk group boundary, then to the requested stride.
chunk_card_start = (jbyte*)(start_card +
(n_strides - start_chunk_stride_num + stride) *
CardsPerStrideChunk);
}
while (chunk_card_start < end_card) {
// We don't have to go downwards here; it wouldn't help anyway,
// because of parallelism. (We take care with "min_done"; see below.)
// Invariant: chunk_mr should be fully contained within the "used" region.
jbyte* chunk_card_end = chunk_card_start + CardsPerStrideChunk;
MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
chunk_card_end >= end_card ?
used.end() : addr_for(chunk_card_end));
assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
// Process the chunk.
process_chunk_boundaries(sp,
dcto_cl,
chunk_mr,
used,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
non_clean_card_iterate_work(chunk_mr, cl, clear);
// Find the next chunk of the stride.
chunk_card_start += CardsPerStrideChunk * n_strides;
}
}
示例10:
void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
HeapRegion* hr,
HeapWord* end) {
hr->prepare_for_compaction(cp);
// Also clear the part of the card table that will be unused after
// compaction.
_mrbs->clear(MemRegion(hr->compaction_top(), end));
}
示例11: lgrp_spaces
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parsability.
void MutableNUMASpace::ensure_parsability() {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
LGRPSpace *ls = lgrp_spaces()->at(i);
MutableSpace *s = ls->space();
if (s->top() < top()) { // For all spaces preceding the one containing top()
if (s->free_in_words() > 0) {
intptr_t cur_top = (intptr_t)s->top();
size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
while (words_left_to_fill > 0) {
size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
assert(words_to_fill >= CollectedHeap::min_fill_size(),
"Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size());
CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
if (!os::numa_has_static_binding()) {
size_t touched_words = words_to_fill;
#ifndef ASSERT
if (!ZapUnusedHeapArea) {
touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
touched_words);
}
#endif
MemRegion invalid;
HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
if (crossing_start != crossing_end) {
// If object header crossed a small page boundary we mark the area
// as invalid rounding it to a page_size().
HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
invalid = MemRegion(start, end);
}
ls->add_invalid_region(invalid);
}
cur_top = cur_top + (words_to_fill * HeapWordSize);
words_left_to_fill -= words_to_fill;
}
}
} else {
if (!os::numa_has_static_binding()) {
#ifdef ASSERT
MemRegion invalid(s->top(), s->end());
ls->add_invalid_region(invalid);
#else
if (ZapUnusedHeapArea) {
MemRegion invalid(s->top(), s->end());
ls->add_invalid_region(invalid);
} else {
return;
}
#endif
} else {
return;
}
}
}
}
示例12: cas_allocate_noexpand
// Support for MT garbage collection. CAS allocation is lower overhead than grabbing
// and releasing the heap lock, which is held during gc's anyway. This method is not
// safe for use at the same time as allocate_noexpand()!
HeapWord* cas_allocate_noexpand(size_t word_size) {
assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint");
HeapWord* res = object_space()->cas_allocate(word_size);
if (res != NULL) {
DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
_start_array.allocate_block(res);
}
return res;
}
示例13: assert
void HeapRegion::par_clear() {
assert(used() == 0, "the region should have been already cleared");
assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
HeapRegionRemSet* hrrs = rem_set();
hrrs->clear();
CardTableModRefBS* ct_bs =
(CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
ct_bs->clear(MemRegion(bottom(), end()));
}
示例14: assert_locked
inline void CMSBitMap::par_markRange(MemRegion mr) {
assert_locked();
mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
assert(!mr.is_empty(), "unexpected empty region");
// convert address range into offset range
size_t start_ofs = heapWordToOffset(mr.start());
size_t end_ofs = heapWordToOffset(mr.end());
// Range size is usually just 1 bit.
_bm.par_set_range(start_ofs, end_ofs, BitMap::small_range);
}
示例15: allocate_noexpand
HeapWord* allocate_noexpand(size_t word_size) {
// We assume the heap lock is held here.
assert_locked_or_safepoint(Heap_lock);
HeapWord* res = object_space()->allocate(word_size);
if (res != NULL) {
DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
_start_array.allocate_block(res);
}
return res;
}