本文整理汇总了C++中MemRegion类的典型用法代码示例。如果您正苦于以下问题:C++ MemRegion类的具体用法?C++ MemRegion怎么用?C++ MemRegion使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MemRegion类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
void G1BlockOffsetArray::set_region(MemRegion mr) {
_bottom = mr.start();
_end = mr.end();
}
示例2: assert
// We get called with "mr" representing the dirty region
// that we want to process. Because of imprecise marking,
// we may need to extend the incoming "mr" to the right,
// and scan more. However, because we may already have
// scanned some of that extended region, we may need to
// trim its right-end back some so we do not scan what
// we (or another worker thread) may already have scanned
// or planning to scan.
void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
// Some collectors need to do special things whenever their dirty
// cards are processed. For instance, CMS must remember mutator updates
// (i.e. dirty cards) so as to re-scan mutated objects.
// Such work can be piggy-backed here on dirty card scanning, so as to make
// it slightly more efficient than doing a complete non-detructive pre-scan
// of the card table.
MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
if (pCl != NULL) {
pCl->do_MemRegion(mr);
}
HeapWord* bottom = mr.start();
HeapWord* last = mr.last();
HeapWord* top = mr.end();
HeapWord* bottom_obj;
HeapWord* top_obj;
assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
_precision == CardTableModRefBS::Precise,
"Only ones we deal with for now.");
assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
_cl->idempotent() || _last_bottom == NULL ||
top <= _last_bottom,
"Not decreasing");
NOT_PRODUCT(_last_bottom = mr.start());
bottom_obj = _sp->block_start(bottom);
top_obj = _sp->block_start(last);
assert(bottom_obj <= bottom, "just checking");
assert(top_obj <= top, "just checking");
// Given what we think is the top of the memory region and
// the start of the object at the top, get the actual
// value of the top.
top = get_actual_top(top, top_obj);
// If the previous call did some part of this region, don't redo.
if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
_min_done != NULL &&
_min_done < top) {
top = _min_done;
}
// Top may have been reset, and in fact may be below bottom,
// e.g. the dirty card region is entirely in a now free object
// -- something that could happen with a concurrent sweeper.
bottom = MIN2(bottom, top);
MemRegion extended_mr = MemRegion(bottom, top);
assert(bottom <= top &&
(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
_min_done == NULL ||
top <= _min_done),
"overlap!");
// Walk the region if it is not empty; otherwise there is nothing to do.
if (!extended_mr.is_empty()) {
walk_mem_region(extended_mr, bottom_obj, top);
}
// An idempotent closure might be applied in any order, so we don't
// record a _min_done for it.
if (!_cl->idempotent()) {
_min_done = bottom;
} else {
assert(_min_done == _last_explicit_min_done,
"Don't update _min_done for idempotent cl");
}
}
示例3: par_clear_large_range
inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
NOT_PRODUCT(region_invariant(mr));
// Range size must be greater than 32 bytes.
_bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
BitMap::large_range);
}
开发者ID:641252154,项目名称:HotSpot-JVM-Linux-x86-Research,代码行数:6,代码来源:concurrentMarkSweepGeneration.inline.hpp
示例4: MIN2
// In the numa case eden is not mangled so a survivor space
// moving into a region previously occupied by a survivor
// may find an unmangled region. Also in the PS case eden
// to-space and from-space may not touch (i.e., there may be
// gaps between them due to movement while resizing the
// spaces). Those gaps must be mangled.
void PSYoungGen::mangle_survivors(MutableSpace* s1,
MemRegion s1MR,
MutableSpace* s2,
MemRegion s2MR) {
// Check eden and gap between eden and from-space, in deciding
// what to mangle in from-space. Check the gap between from-space
// and to-space when deciding what to mangle.
//
// +--------+ +----+ +---+
// | eden | |s1 | |s2 |
// +--------+ +----+ +---+
// +-------+ +-----+
// |s1MR | |s2MR |
// +-------+ +-----+
// All of survivor-space is properly mangled so find the
// upper bound on the mangling for any portion above current s1.
HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
MemRegion delta1_left;
if (s1MR.start() < delta_end) {
delta1_left = MemRegion(s1MR.start(), delta_end);
s1->mangle_region(delta1_left);
}
// Find any portion to the right of the current s1.
HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
MemRegion delta1_right;
if (delta_start < s1MR.end()) {
delta1_right = MemRegion(delta_start, s1MR.end());
s1->mangle_region(delta1_right);
}
// Similarly for the second survivor space except that
// any of the new region that overlaps with the current
// region of the first survivor space has already been
// mangled.
delta_end = MIN2(s2->bottom(), s2MR.end());
delta_start = MAX2(s2MR.start(), s1->end());
MemRegion delta2_left;
if (s2MR.start() < delta_end) {
delta2_left = MemRegion(s2MR.start(), delta_end);
s2->mangle_region(delta2_left);
}
delta_start = MAX2(s2->end(), s2MR.start());
MemRegion delta2_right;
if (delta_start < s2MR.end()) {
s2->mangle_region(delta2_right);
}
if (TraceZapUnusedHeapArea) {
// s1
gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
"New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
s1->bottom(), s1->end(), s1MR.start(), s1MR.end());
gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
delta1_left.start(), delta1_left.end(), delta1_right.start(),
delta1_right.end());
// s2
gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
"New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
s2->bottom(), s2->end(), s2MR.start(), s2MR.end());
gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
delta2_left.start(), delta2_left.end(), delta2_right.start(),
delta2_right.end());
}
}
示例5: pretouch_pages
void MutableSpace::pretouch_pages(MemRegion mr) {
for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
char t = *p; *p = t;
}
}
示例6: MemRegion
MemRegion MemRegion::minus(const MemRegion mr2) const {
// There seem to be 6 cases:
// |this MemRegion|
// |strictly below|
// |overlap beginning|
// |interior|
// |overlap ending|
// |strictly above|
// |completely overlapping|
// We can't deal with an interior case because it would
// produce two disjoint regions as a result.
// We aren't trying to be optimal in the number of tests below,
// but the order is important to distinguish the strictly cases
// from the overlapping cases.
if (mr2.end() <= start()) {
// strictly below
return MemRegion(start(), end());
}
if (mr2.start() <= start() && mr2.end() <= end()) {
// overlap beginning
return MemRegion(mr2.end(), end());
}
if (mr2.start() >= end()) {
// strictly above
return MemRegion(start(), end());
}
if (mr2.start() >= start() && mr2.end() >= end()) {
// overlap ending
return MemRegion(start(), mr2.start());
}
if (mr2.start() <= start() && mr2.end() >= end()) {
// completely overlapping
return MemRegion();
}
if (mr2.start() > start() && mr2.end() < end()) {
// interior
guarantee(false, "MemRegion::minus, but interior");
return MemRegion();
}
ShouldNotReachHere();
return MemRegion();
}
示例7: clear
void CardTableRS::clear(MemRegion mr) {
for (int i = 0; i < _ct_bs._cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_ct_bs._covered[i]);
if (mri.byte_size() > 0) clear_MemRegion(mri);
}
}
示例8: oop_iterate
void InterpreterOopMap::oop_iterate(OopClosure *blk, MemRegion mr) {
if (method() != NULL && mr.contains(&_method)) {
blk->do_oop((oop*) &_method);
}
}
示例9: mangle_region
void ContiguousSpace::mangle_region(MemRegion mr) {
debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord));
}
示例10: process_stride
void
CardTableModRefBS::
process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
OopsInGenClosure* cl,
CardTableRS* ct,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) {
// We go from higher to lower addresses here; it wouldn't help that much
// because of the strided parallelism pattern used here.
// Find the first card address of the first chunk in the stride that is
// at least "bottom" of the used region.
jbyte* start_card = byte_for(used.start());
jbyte* end_card = byte_after(used.last());
uintptr_t start_chunk = addr_to_chunk_index(used.start());
uintptr_t start_chunk_stride_num = start_chunk % n_strides;
jbyte* chunk_card_start;
if ((uintptr_t)stride >= start_chunk_stride_num) {
chunk_card_start = (jbyte*)(start_card +
(stride - start_chunk_stride_num) *
ParGCCardsPerStrideChunk);
} else {
// Go ahead to the next chunk group boundary, then to the requested stride.
chunk_card_start = (jbyte*)(start_card +
(n_strides - start_chunk_stride_num + stride) *
ParGCCardsPerStrideChunk);
}
while (chunk_card_start < end_card) {
// Even though we go from lower to higher addresses below, the
// strided parallelism can interleave the actual processing of the
// dirty pages in various ways. For a specific chunk within this
// stride, we take care to avoid double scanning or missing a card
// by suitably initializing the "min_done" field in process_chunk_boundaries()
// below, together with the dirty region extension accomplished in
// DirtyCardToOopClosure::do_MemRegion().
jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
// Invariant: chunk_mr should be fully contained within the "used" region.
MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
chunk_card_end >= end_card ?
used.end() : addr_for(chunk_card_end));
assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
cl->gen_boundary());
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
// Process the chunk.
process_chunk_boundaries(sp,
dcto_cl,
chunk_mr,
used,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
// We want the LNC array updates above in process_chunk_boundaries
// to be visible before any of the card table value changes as a
// result of the dirty card iteration below.
OrderAccess::storestore();
// We do not call the non_clean_card_iterate_serial() version because
// we want to clear the cards: clear_cl here does the work of finding
// contiguous dirty ranges of cards to process and clear.
clear_cl.do_MemRegion(chunk_mr);
// Find the next chunk of the stride.
chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
}
}
示例11: par_clear_range
inline void CMSBitMap::par_clear_range(MemRegion mr) {
NOT_PRODUCT(region_invariant(mr));
// Range size is usually just 1 bit.
_bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
BitMap::small_range);
}
开发者ID:641252154,项目名称:HotSpot-JVM-Linux-x86-Research,代码行数:6,代码来源:concurrentMarkSweepGeneration.inline.hpp
示例12: process_chunk_boundaries
void
CardTableModRefBS::
process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
MemRegion used,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size)
{
// We must worry about the chunk boundaries.
// First, set our max_to_do:
HeapWord* max_to_do = NULL;
uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index;
if (chunk_mr.end() < used.end()) {
// This is not the last chunk in the used region. What is the last
// object?
HeapWord* last_block = sp->block_start(chunk_mr.end());
assert(last_block <= chunk_mr.end(), "In case this property changes.");
if (last_block == chunk_mr.end()
|| !sp->block_is_obj(last_block)) {
max_to_do = chunk_mr.end();
} else {
// It is an object and starts before the end of the current chunk.
// last_obj_card is the card corresponding to the start of the last object
// in the chunk. Note that the last object may not start in
// the chunk.
jbyte* last_obj_card = byte_for(last_block);
if (!card_may_have_been_dirty(*last_obj_card)) {
// The card containing the head is not dirty. Any marks in
// subsequent cards still in this chunk must have been made
// precisely; we can cap processing at the end.
max_to_do = chunk_mr.end();
} else {
// The last object must be considered dirty, and extends onto the
// following chunk. Look for a dirty card in that chunk that will
// bound our processing.
jbyte* limit_card = NULL;
size_t last_block_size = sp->block_size(last_block);
jbyte* last_card_of_last_obj =
byte_for(last_block + last_block_size - 1);
jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end());
// This search potentially goes a long distance looking
// for the next card that will be scanned. For example,
// an object that is an array of primitives will not
// have any cards covering regions interior to the array
// that will need to be scanned. The scan can be terminated
// at the last card of the next chunk. That would leave
// limit_card as NULL and would result in "max_to_do"
// being set with the LNC value or with the end
// of the last block.
jbyte* last_card_of_next_chunk = first_card_of_next_chunk +
CardsPerStrideChunk;
assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start())
== CardsPerStrideChunk, "last card of next chunk may be wrong");
jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj,
last_card_of_next_chunk);
for (jbyte* cur = first_card_of_next_chunk;
cur <= last_card_to_check; cur++) {
if (card_will_be_scanned(*cur)) {
limit_card = cur; break;
}
}
assert(0 <= cur_chunk_index+1 &&
cur_chunk_index+1 < lowest_non_clean_chunk_size,
"Bounds error.");
// LNC for the next chunk
jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1];
if (limit_card == NULL) {
limit_card = lnc_card;
}
if (limit_card != NULL) {
if (lnc_card != NULL) {
limit_card = (jbyte*)MIN2((intptr_t)limit_card,
(intptr_t)lnc_card);
}
max_to_do = addr_for(limit_card);
} else {
max_to_do = last_block + last_block_size;
}
}
}
assert(max_to_do != NULL, "OOPS!");
} else {
max_to_do = used.end();
}
// Now we can set the closure we're using so it doesn't to beyond
// max_to_do.
dcto_cl->set_min_done(max_to_do);
#ifndef PRODUCT
dcto_cl->set_last_bottom(max_to_do);
#endif
// Now we set *our" lowest_non_clean entry.
// Find the object that spans our boundary, if one exists.
// Nothing to do on the first chunk.
//.........这里部分代码省略.........
示例13: oop_oop_iterate_m
int instanceKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
MemRegion mr) {
assert(obj->is_klass(),"must be a klass");
assert(klassOop(obj)->klass_part()->oop_is_instance(), "must be instance klass");
instanceKlass* ik = instanceKlass::cast(klassOop(obj));
// Get size before changing pointers.
// Don't call size() or oop_size() since that is a virtual call.
int size = ik->object_size();
ik->iterate_static_fields(blk, mr);
ik->vtable()->oop_oop_iterate_m(blk, mr);
ik->itable()->oop_oop_iterate_m(blk, mr);
oop* adr;
adr = ik->adr_array_klasses();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_methods();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_method_ordering();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_local_interfaces();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_transitive_interfaces();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_fields();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_constants();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_class_loader();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_protection_domain();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_signers();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_source_file_name();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_source_debug_extension();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_inner_classes();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_implementor();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_previous_version();
if (mr.contains(adr)) blk->do_oop(adr);
klassKlass::oop_oop_iterate_m(obj, blk, mr);
if(ik->oop_map_cache() != NULL) ik->oop_map_cache()->oop_iterate(blk, mr);
return size;
}
示例14: Q_CHECK_PTR
//------------------------------------------------------------------------------
// Name: read_bytes(edb::address_t address, void *buf, std::size_t len)
// Desc: reads <len> bytes into <buf> starting at <address>
// Note: if the read failed, the part of the buffer that could not be read will
// be filled with 0xff bytes
//------------------------------------------------------------------------------
bool DebuggerCore::read_bytes(edb::address_t address, void *buf, std::size_t len) {
Q_CHECK_PTR(buf);
bool ok = false;
if(attached()) {
if(len == 0) {
return true;
}
memset(buf, 0xff, len);
// might wanna make this more platform specific (e.g. Windows x86 user mode memory <= 0x7FFFFFFF)
const edb::address_t max_address = std::numeric_limits<edb::address_t>::max();
/*
// I think we can safely assume this won't happen as long as
// max_address is the biggest representable number ;)
if(address > max_address || len > max_address) {
return false;
}
*/
edb::address_t cur_address = address;
edb::address_t end_address;
// check for max possible address (and overflow :s)
// took a few hours to find that bug
if(overflows<edb::address_t>(address, len, max_address)) {
end_address = max_address;
} else {
end_address = address + len - 1;
}
len = end_address - address + 1;
const MemoryRegions& regions = edb::v1::memory_regions();
while(cur_address <= end_address) {
bool part_ok = false;
void* cur_dest = reinterpret_cast<quint8 *>(buf) + (cur_address - address);
edb::address_t cur_end;
std::size_t cur_len;
MemRegion mem;
if(regions.find_region(cur_address, mem)) {
bool changed = false;
if(!mem.readable()) {
mem.set_permissions(true, mem.writable(), mem.executable());
changed = true;
}
// special cases: first and last region (with unaligned address or end_address)
if(overflows<edb::address_t>(mem.start, mem.size(), end_address)) {
cur_end = end_address;
} else {
cur_end = mem.start + mem.size() - 1;
}
cur_len = cur_end - cur_address + 1;
SIZE_T bytes_read;
part_ok = ReadProcessMemory(process_handle_, reinterpret_cast<void*>(cur_address), cur_dest, cur_len, &bytes_read);
Q_ASSERT(bytes_read == cur_len);
if(part_ok) {
ok = true;
Q_FOREACH(const Breakpoint::pointer &bp, breakpoints_) {
if((bp->address() + breakpoint_size()) > cur_address && bp->address() <= cur_end) {
// show the original bytes in the buffer..
const QByteArray& bytes = bp->original_bytes();
Q_ASSERT(bytes.size() == breakpoint_size());
size_t offset = qMax(bp->address(), cur_address) - bp->address();
const size_t bp_size = qMin<size_t>(breakpoint_size(), (end_address - bp->address() + 1)) - offset;
const void* bp_src = bytes.data() + offset;
void* bp_dest = reinterpret_cast<quint8 *>(buf) + (bp->address() + offset - address);
memcpy(bp_dest, bp_src, bp_size);
}
}
}
if(changed) {
mem.set_permissions(false, mem.writable(), mem.executable());
}
} else {
// check next possible page
const edb::address_t cur_base = cur_address - (cur_address % page_size());
if(overflows<edb::address_t>(cur_base, page_size(), end_address)) {
cur_end = end_address;
} else {
cur_end = cur_base + page_size() - 1;
//.........这里部分代码省略.........
示例15: bits_required
inline ParMarkBitMap::idx_t
ParMarkBitMap::bits_required(MemRegion covered_region)
{
return bits_required(covered_region.word_size());
}