本文整理汇总了C++中MemRegion::last方法的典型用法代码示例。如果您正苦于以下问题:C++ MemRegion::last方法的具体用法?C++ MemRegion::last怎么用?C++ MemRegion::last使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MemRegion
的用法示例。
在下文中一共展示了MemRegion::last方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: while
void
CardTableModRefBS::
process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) {
// We don't have to go downwards here; it wouldn't help anyway,
// because of parallelism.
// Find the first card address of the first chunk in the stride that is
// at least "bottom" of the used region.
jbyte* start_card = byte_for(used.start());
jbyte* end_card = byte_after(used.last());
uintptr_t start_chunk = addr_to_chunk_index(used.start());
uintptr_t start_chunk_stride_num = start_chunk % n_strides;
jbyte* chunk_card_start;
if ((uintptr_t)stride >= start_chunk_stride_num) {
chunk_card_start = (jbyte*)(start_card +
(stride - start_chunk_stride_num) *
CardsPerStrideChunk);
} else {
// Go ahead to the next chunk group boundary, then to the requested stride.
chunk_card_start = (jbyte*)(start_card +
(n_strides - start_chunk_stride_num + stride) *
CardsPerStrideChunk);
}
while (chunk_card_start < end_card) {
// We don't have to go downwards here; it wouldn't help anyway,
// because of parallelism. (We take care with "min_done"; see below.)
// Invariant: chunk_mr should be fully contained within the "used" region.
jbyte* chunk_card_end = chunk_card_start + CardsPerStrideChunk;
MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
chunk_card_end >= end_card ?
used.end() : addr_for(chunk_card_end));
assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
// Process the chunk.
process_chunk_boundaries(sp,
dcto_cl,
chunk_mr,
used,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
non_clean_card_iterate_work(chunk_mr, cl, clear);
// Find the next chunk of the stride.
chunk_card_start += CardsPerStrideChunk * n_strides;
}
}
示例2: clear_MemRegion
void CardTableRS::clear_MemRegion(MemRegion mr) {
jbyte* cur = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
assert(addr_for(cur) == mr.start(), "region must be card aligned");
while (cur < last) {
*cur = CardTableModRefBS::clean_card;
cur++;
}
}
示例3: verify_space
void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
// We don't need to do young-gen spaces.
if (s->end() <= gen_boundary) return;
MemRegion used = s->used_region();
jbyte* cur_entry = byte_for(used.start());
jbyte* limit = byte_after(used.last());
while (cur_entry < limit) {
if (*cur_entry == CardTableModRefBS::clean_card) {
jbyte* first_dirty = cur_entry+1;
while (first_dirty < limit &&
*first_dirty == CardTableModRefBS::clean_card)
first_dirty++;
// If the first object is a regular object, and it has a
// young-to-old field, that would mark the previous card.
HeapWord* boundary = addr_for(cur_entry);
HeapWord* end = addr_for(first_dirty);
HeapWord* boundary_block = s->block_start(boundary);
HeapWord* begin = boundary; // Until proven otherwise.
HeapWord* start_block = boundary_block; // Until proven otherwise.
if (boundary_block < boundary) {
if (s->block_is_obj(boundary_block)) {
oop boundary_obj = oop(boundary_block);
if (!boundary_obj->is_objArray() &&
!boundary_obj->is_typeArray()) {
guarantee(cur_entry > byte_for(used.start()),
"else boundary would be boundary_block");
if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
begin = boundary_block + s->block_size(boundary_block);
start_block = begin;
}
}
}
}
// Now traverse objects until end.
HeapWord* cur = start_block;
VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
while (cur < end) {
if (s->block_is_obj(cur)) {
oop(cur)->oop_iterate(&verify_blk);
}
cur += s->block_size(cur);
}
cur_entry = first_dirty;
} else {
guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
"Illegal CT value");
// If we're in the parallel case, the cur and prev values are
// different, and we can't have left a prev in the table.
guarantee(cur_youngergen_card_val() == youngergen_card
|| !is_prev_youngergen_card_val(*cur_entry),
"Illegal CT value");
cur_entry++;
}
}
}
示例4: memset
void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last());
// Below we may use an explicit loop instead of memset() because on
// certain platforms memset() can give concurrent readers phantom zeros.
if (UseMemSetInBOT) {
memset(first, g1_young_gen, last - first);
} else {
for (jbyte* i = first; i < last; i++) {
*i = g1_young_gen;
}
}
}
示例5: non_clean_card_iterate_parallel_work
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
int n_threads) {
assert(n_threads > 0, "Error: expected n_threads > 0");
assert((n_threads == 1 && ParallelGCThreads == 0) ||
n_threads <= (int)ParallelGCThreads,
"# worker threads != # requested!");
assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
assert(UseDynamicNumberOfGCThreads ||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
n_threads == (int)ParallelGCThreads,
"# worker threads != # requested!");
// Make sure the LNC array is valid for the space.
jbyte** lowest_non_clean;
uintptr_t lowest_non_clean_base_chunk_index;
size_t lowest_non_clean_chunk_size;
get_LNC_array_for_space(sp, lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
uint n_strides = n_threads * ParGCStridesPerThread;
SequentialSubTasksDone* pst = sp->par_seq_tasks();
// Sets the condition for completion of the subtask (how many threads
// need to finish in order to be done).
pst->set_n_threads(n_threads);
pst->set_n_tasks(n_strides);
uint stride = 0;
while (!pst->is_task_claimed(/* reference */ stride)) {
process_stride(sp, mr, stride, n_strides, cl, ct,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
}
if (pst->all_tasks_completed()) {
// Clear lowest_non_clean array for next time.
intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
intptr_t ind = ch - lowest_non_clean_base_chunk_index;
assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
"Bounds error");
lowest_non_clean[ind] = NULL;
}
}
}
示例6: verify_aligned_region_empty
void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
if (!mr.is_empty()) {
jbyte* cur_entry = byte_for(mr.start());
jbyte* limit = byte_after(mr.last());
// The region mr may not start on a card boundary so
// the first card may reflect a write to the space
// just prior to mr.
if (!is_aligned(mr.start())) {
cur_entry++;
}
for (;cur_entry < limit; cur_entry++) {
guarantee(*cur_entry == CardTableModRefBS::clean_card,
"Unexpected dirty card found");
}
}
}
示例7: assert
void CardTableModRefBSForCTRS::
non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
uint n_threads) {
assert(n_threads > 0, "expected n_threads > 0");
assert(n_threads <= ParallelGCThreads,
err_msg("n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads));
// Make sure the LNC array is valid for the space.
jbyte** lowest_non_clean;
uintptr_t lowest_non_clean_base_chunk_index;
size_t lowest_non_clean_chunk_size;
get_LNC_array_for_space(sp, lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
uint n_strides = n_threads * ParGCStridesPerThread;
SequentialSubTasksDone* pst = sp->par_seq_tasks();
// Sets the condition for completion of the subtask (how many threads
// need to finish in order to be done).
pst->set_n_threads(n_threads);
pst->set_n_tasks(n_strides);
uint stride = 0;
while (!pst->is_task_claimed(/* reference */ stride)) {
process_stride(sp, mr, stride, n_strides,
cl, ct,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
}
if (pst->all_tasks_completed()) {
// Clear lowest_non_clean array for next time.
intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
intptr_t ind = ch - lowest_non_clean_base_chunk_index;
assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
"Bounds error");
lowest_non_clean[ind] = NULL;
}
}
}
示例8: x
void
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
volatile jbyte* byte = byte_for(mr.start());
jbyte* last_byte = byte_for(mr.last());
Thread* thr = Thread::current();
if (whole_heap) {
while (byte <= last_byte) {
*byte = dirty_card;
byte++;
}
} else {
// skip all consecutive young cards
for (; byte <= last_byte && *byte == g1_young_gen; byte++);
if (byte <= last_byte) {
OrderAccess::storeload();
// Enqueue if necessary.
if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr;
for (; byte <= last_byte; byte++) {
if (*byte == g1_young_gen) {
continue;
}
if (*byte != dirty_card) {
*byte = dirty_card;
jt->dirty_card_queue().enqueue(byte);
}
}
} else {
MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag);
for (; byte <= last_byte; byte++) {
if (*byte == g1_young_gen) {
continue;
}
if (*byte != dirty_card) {
*byte = dirty_card;
_dcqs.shared_dirty_card_queue()->enqueue(byte);
}
}
}
}
}
}
示例9: allocate_containing_regions
bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) {
size_t commits = 0;
uint start_index = (uint)_regions.get_index_by_address(range.start());
uint last_index = (uint)_regions.get_index_by_address(range.last());
// Ensure that each G1 region in the range is free, returning false if not.
// Commit those that are not yet available, and keep count.
for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
if (!is_available(curr_index)) {
commits++;
expand_at(curr_index, 1);
}
HeapRegion* curr_region = _regions.get_by_index(curr_index);
if (!curr_region->is_free()) {
return false;
}
}
allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
*commit_count = commits;
return true;
}
示例10: do_MemRegion
void do_MemRegion(MemRegion mr) {
HeapWord* end_of_non_clean = mr.end();
HeapWord* start_of_non_clean = end_of_non_clean;
jbyte* entry = _ct->byte_for(mr.last());
HeapWord* cur = _ct->addr_for(entry);
while (mr.contains(cur)) {
jbyte entry_val = *entry;
if (!clear_card(entry)) {
if (start_of_non_clean < end_of_non_clean) {
MemRegion mr2(start_of_non_clean, end_of_non_clean);
_dirty_card_closure->do_MemRegion(mr2);
}
end_of_non_clean = cur;
start_of_non_clean = end_of_non_clean;
}
entry--;
start_of_non_clean = cur;
cur = _ct->addr_for(entry);
}
if (start_of_non_clean < end_of_non_clean) {
MemRegion mr2(start_of_non_clean, end_of_non_clean);
_dirty_card_closure->do_MemRegion(mr2);
}
}
示例11: assert
void
CardTableModRefBS::
process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
MemRegion used,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size)
{
// We must worry about the chunk boundaries.
// First, set our max_to_do:
HeapWord* max_to_do = NULL;
uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index;
if (chunk_mr.end() < used.end()) {
// This is not the last chunk in the used region. What is the last
// object?
HeapWord* last_block = sp->block_start(chunk_mr.end());
assert(last_block <= chunk_mr.end(), "In case this property changes.");
if (last_block == chunk_mr.end()
|| !sp->block_is_obj(last_block)) {
max_to_do = chunk_mr.end();
} else {
// It is an object and starts before the end of the current chunk.
// last_obj_card is the card corresponding to the start of the last object
// in the chunk. Note that the last object may not start in
// the chunk.
jbyte* last_obj_card = byte_for(last_block);
if (!card_may_have_been_dirty(*last_obj_card)) {
// The card containing the head is not dirty. Any marks in
// subsequent cards still in this chunk must have been made
// precisely; we can cap processing at the end.
max_to_do = chunk_mr.end();
} else {
// The last object must be considered dirty, and extends onto the
// following chunk. Look for a dirty card in that chunk that will
// bound our processing.
jbyte* limit_card = NULL;
size_t last_block_size = sp->block_size(last_block);
jbyte* last_card_of_last_obj =
byte_for(last_block + last_block_size - 1);
jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end());
// This search potentially goes a long distance looking
// for the next card that will be scanned. For example,
// an object that is an array of primitives will not
// have any cards covering regions interior to the array
// that will need to be scanned. The scan can be terminated
// at the last card of the next chunk. That would leave
// limit_card as NULL and would result in "max_to_do"
// being set with the LNC value or with the end
// of the last block.
jbyte* last_card_of_next_chunk = first_card_of_next_chunk +
CardsPerStrideChunk;
assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start())
== CardsPerStrideChunk, "last card of next chunk may be wrong");
jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj,
last_card_of_next_chunk);
for (jbyte* cur = first_card_of_next_chunk;
cur <= last_card_to_check; cur++) {
if (card_will_be_scanned(*cur)) {
limit_card = cur; break;
}
}
assert(0 <= cur_chunk_index+1 &&
cur_chunk_index+1 < lowest_non_clean_chunk_size,
"Bounds error.");
// LNC for the next chunk
jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1];
if (limit_card == NULL) {
limit_card = lnc_card;
}
if (limit_card != NULL) {
if (lnc_card != NULL) {
limit_card = (jbyte*)MIN2((intptr_t)limit_card,
(intptr_t)lnc_card);
}
max_to_do = addr_for(limit_card);
} else {
max_to_do = last_block + last_block_size;
}
}
}
assert(max_to_do != NULL, "OOPS!");
} else {
max_to_do = used.end();
}
// Now we can set the closure we're using so it doesn't to beyond
// max_to_do.
dcto_cl->set_min_done(max_to_do);
#ifndef PRODUCT
dcto_cl->set_last_bottom(max_to_do);
#endif
// Now we set *our" lowest_non_clean entry.
// Find the object that spans our boundary, if one exists.
// Nothing to do on the first chunk.
//.........这里部分代码省略.........
示例12: verify_space
void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
// We don't need to do young-gen spaces.
if (s->end() <= gen_boundary) return;
MemRegion used = s->used_region();
jbyte* cur_entry = byte_for(used.start());
jbyte* limit = byte_after(used.last());
while (cur_entry < limit) {
if (*cur_entry == CardTableModRefBS::clean_card) {
jbyte* first_dirty = cur_entry+1;
while (first_dirty < limit &&
*first_dirty == CardTableModRefBS::clean_card) {
first_dirty++;
}
// If the first object is a regular object, and it has a
// young-to-old field, that would mark the previous card.
HeapWord* boundary = addr_for(cur_entry);
HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
HeapWord* boundary_block = s->block_start(boundary);
HeapWord* begin = boundary; // Until proven otherwise.
HeapWord* start_block = boundary_block; // Until proven otherwise.
if (boundary_block < boundary) {
if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
oop boundary_obj = oop(boundary_block);
if (!boundary_obj->is_objArray() &&
!boundary_obj->is_typeArray()) {
guarantee(cur_entry > byte_for(used.start()),
"else boundary would be boundary_block");
if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
begin = boundary_block + s->block_size(boundary_block);
start_block = begin;
}
}
}
}
// Now traverse objects until end.
if (begin < end) {
MemRegion mr(begin, end);
VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) {
if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
oop(cur)->oop_iterate(&verify_blk, mr);
}
}
}
cur_entry = first_dirty;
} else {
// We'd normally expect that cur_youngergen_and_prev_nonclean_card
// is a transient value, that cannot be in the card table
// except during GC, and thus assert that:
// guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
// "Illegal CT value");
// That however, need not hold, as will become clear in the
// following...
// We'd normally expect that if we are in the parallel case,
// we can't have left a prev value (which would be different
// from the current value) in the card table, and so we'd like to
// assert that:
// guarantee(cur_youngergen_card_val() == youngergen_card
// || !is_prev_youngergen_card_val(*cur_entry),
// "Illegal CT value");
// That, however, may not hold occasionally, because of
// CMS or MSC in the old gen. To wit, consider the
// following two simple illustrative scenarios:
// (a) CMS: Consider the case where a large object L
// spanning several cards is allocated in the old
// gen, and has a young gen reference stored in it, dirtying
// some interior cards. A young collection scans the card,
// finds a young ref and installs a youngergenP_n value.
// L then goes dead. Now a CMS collection starts,
// finds L dead and sweeps it up. Assume that L is
// abutting _unallocated_blk, so _unallocated_blk is
// adjusted down to (below) L. Assume further that
// no young collection intervenes during this CMS cycle.
// The next young gen cycle will not get to look at this
// youngergenP_n card since it lies in the unoccupied
// part of the space.
// Some young collections later the blocks on this
// card can be re-allocated either due to direct allocation
// or due to absorbing promotions. At this time, the
// before-gc verification will fail the above assert.
// (b) MSC: In this case, an object L with a young reference
// is on a card that (therefore) holds a youngergen_n value.
// Suppose also that L lies towards the end of the used
// the used space before GC. An MSC collection
// occurs that compacts to such an extent that this
// card is no longer in the occupied part of the space.
// Since current code in MSC does not always clear cards
// in the unused part of old gen, this stale youngergen_n
// value is left behind and can later be covered by
// an object when promotion or direct allocation
// re-allocates that part of the heap.
//
// Fortunately, the presence of such stale card values is
// "only" a minor annoyance in that subsequent young collections
// might needlessly scan such cards, but would still never corrupt
// the heap as a result. However, it's likely not to be a significant
// performance inhibitor in practice. For instance,
// some recent measurements with unoccupied cards eagerly cleared
//.........这里部分代码省略.........
示例13: clear_cl
void
CardTableModRefBS::
process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
OopsInGenClosure* cl,
CardTableRS* ct,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) {
// We go from higher to lower addresses here; it wouldn't help that much
// because of the strided parallelism pattern used here.
// Find the first card address of the first chunk in the stride that is
// at least "bottom" of the used region.
jbyte* start_card = byte_for(used.start());
jbyte* end_card = byte_after(used.last());
uintptr_t start_chunk = addr_to_chunk_index(used.start());
uintptr_t start_chunk_stride_num = start_chunk % n_strides;
jbyte* chunk_card_start;
if ((uintptr_t)stride >= start_chunk_stride_num) {
chunk_card_start = (jbyte*)(start_card +
(stride - start_chunk_stride_num) *
ParGCCardsPerStrideChunk);
} else {
// Go ahead to the next chunk group boundary, then to the requested stride.
chunk_card_start = (jbyte*)(start_card +
(n_strides - start_chunk_stride_num + stride) *
ParGCCardsPerStrideChunk);
}
while (chunk_card_start < end_card) {
// Even though we go from lower to higher addresses below, the
// strided parallelism can interleave the actual processing of the
// dirty pages in various ways. For a specific chunk within this
// stride, we take care to avoid double scanning or missing a card
// by suitably initializing the "min_done" field in process_chunk_boundaries()
// below, together with the dirty region extension accomplished in
// DirtyCardToOopClosure::do_MemRegion().
jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
// Invariant: chunk_mr should be fully contained within the "used" region.
MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
chunk_card_end >= end_card ?
used.end() : addr_for(chunk_card_end));
assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
cl->gen_boundary());
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
// Process the chunk.
process_chunk_boundaries(sp,
dcto_cl,
chunk_mr,
used,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
// We want the LNC array updates above in process_chunk_boundaries
// to be visible before any of the card table value changes as a
// result of the dirty card iteration below.
OrderAccess::storestore();
// We do not call the non_clean_card_iterate_serial() version because
// we want to clear the cards: clear_cl here does the work of finding
// contiguous dirty ranges of cards to process and clear.
clear_cl.do_MemRegion(chunk_mr);
// Find the next chunk of the stride.
chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
}
}