本文整理汇总了C++中round_to函数的典型用法代码示例。如果您正苦于以下问题:C++ round_to函数的具体用法?C++ round_to怎么用?C++ round_to使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了round_to函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
// Creates a simple CodeBlob. Sets up the size of the different regions.
CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {
assert(size == round_to(size, oopSize), "unaligned size");
assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
assert(header_size == round_to(header_size, oopSize), "unaligned size");
assert(!UseRelocIndex, "no space allocated for reloc index yet");
// Note: If UseRelocIndex is enabled, there needs to be (at least) one
// extra word for the relocation information, containing the reloc
// index table length. Unfortunately, the reloc index table imple-
// mentation is not easily understandable and thus it is not clear
// what exactly the format is supposed to be. For now, we just turn
// off the use of this table (gri 7/6/2000).
_name = name;
_size = size;
_frame_complete_offset = frame_complete;
_header_size = header_size;
_relocation_size = locs_size;
_instructions_offset = align_code_offset(header_size + locs_size);
_data_offset = size;
_oops_offset = size;
_oops_length = 0;
_frame_size = 0;
set_oop_maps(NULL);
}
示例2: code_at
int Bytecodes::special_length_at(address bcp) {
Code code = code_at(bcp);
switch (code) {
case _wide:
return wide_length_for(cast(*(bcp + 1)));
case _tableswitch:
{ address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize);
jlong lo = (jint)Bytes::get_Java_u4(aligned_bcp + 1*jintSize);
jlong hi = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
jlong len = (aligned_bcp - bcp) + (3 + hi - lo + 1)*jintSize;
// only return len if it can be represented as a positive int;
// return -1 otherwise
return (len > 0 && len == (int)len) ? len : -1;
}
case _lookupswitch: // fall through
case _fast_binaryswitch: // fall through
case _fast_linearswitch:
{ address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize);
jlong npairs = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
jlong len = (aligned_bcp - bcp) + (2 + 2*npairs)*jintSize;
// only return len if it can be represented as a positive int;
// return -1 otherwise
return (len > 0 && len == (int)len) ? len : -1;
}
}
return 0;
}
示例3: _name
CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
_frame_complete_offset(frame_complete_offset),
_data_offset(layout.data_offset()),
_frame_size(frame_size),
_strings(CodeStrings()),
_caller_must_gc_arguments(caller_must_gc_arguments),
_code_begin(layout.code_begin()),
_code_end(layout.code_end()),
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
_content_begin(layout.content_begin()),
_type(type)
{
assert(_size == round_to(_size, oopSize), "unaligned size");
assert(_header_size == round_to(_header_size, oopSize), "unaligned size");
assert(_data_offset <= _size, "codeBlob is too small");
assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
set_oop_maps(oop_maps);
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
示例4: assert0
// --- generate
address MethodStubBlob::generate( heapRef moop, address c2i_adapter ) {
// NativeMethodStubs must be jumped-to directly and are packed back-to-back.
// Hence they start CodeEntryAligned, and each later one has to be
// CodeEntryAligned so we expect the instruction_size to be a multiple.
assert0( round_to(NativeMethodStub::instruction_size,CodeEntryAlignment) == NativeMethodStub::instruction_size );
NativeMethodStub *nms;
do {
// The _free_list is a racing CAS-managed link-list. Must read the
// _free_list exactly ONCE before the CAS attempt below, or otherwise know
// we have something that used to be on the free_list and is not-null. In
// generally, if we re-read the free_list we have to null-check the result.
nms = _free_list;
if( !nms ) {
// CodeCache makes CodeBlobs. Make a CodeBlob typed as a methodCodeStub.
CodeBlob *cb = CodeCache::malloc_CodeBlob( CodeBlob::methodstub, 256*NativeMethodStub::instruction_size );
address adr = (address)round_to((intptr_t)cb->code_begins(),CodeEntryAlignment);
cb->_code_start_offset = adr-(address)cb->_code_begins;
while( adr+NativeMethodStub::instruction_size < cb->end() ) {
free_stub((NativeMethodStub*)adr);
adr += NativeMethodStub::instruction_size;
}
// The last not-null thing jammed on the freelist.
nms = (NativeMethodStub*)(adr-NativeMethodStub::instruction_size);
}
} while( Atomic::cmpxchg_ptr(*(NativeMethodStub**)nms,&_free_list,nms) != nms );
nms->fill( moop, c2i_adapter );
return(address)nms;
}
示例5: size_activation_helper
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated
// expression stack, the callee will have callee_extra_locals (so we can account for
// frame extension) and monitor_size for monitors. Basically we need to calculate
// this exactly like generate_fixed_frame/generate_compute_interpreter_state.
//
//
// The big complicating thing here is that we must ensure that the stack stays properly
// aligned. This would be even uglier if monitor size wasn't modulo what the stack
// needs to be aligned for). We are given that the sp (fp) is already aligned by
// the caller so we must ensure that it is properly aligned for our callee.
//
const int rounded_vm_local_words =
round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
// callee_locals and max_stack are counts, not the size in frame.
const int locals_size =
round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
const int max_stack_words = max_stack * Interpreter::stackElementWords;
return (round_to((max_stack_words
+ rounded_vm_local_words
+ frame::memory_parameter_word_sp_offset), WordsPerLong)
// already rounded
+ locals_size + monitor_size);
}
示例6: assert
// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions,
// and copy code and relocation info.
CodeBlob::CodeBlob(
const char* name,
CodeBuffer* cb,
int header_size,
int size,
int frame_complete,
int frame_size,
OopMapSet* oop_maps
) {
assert(size == round_to(size, oopSize), "unaligned size");
assert(header_size == round_to(header_size, oopSize), "unaligned size");
_name = name;
_size = size;
_frame_complete_offset = frame_complete;
_header_size = header_size;
_relocation_size = round_to(cb->total_relocation_size(), oopSize);
_content_offset = align_code_offset(header_size + _relocation_size);
_code_offset = _content_offset + cb->total_offset_of(cb->insts());
_data_offset = _content_offset + round_to(cb->total_content_size(), oopSize);
assert(_data_offset <= size, "codeBlob is too small");
cb->copy_code_and_locs_to(this);
set_oop_maps(oop_maps);
_frame_size = frame_size;
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
示例7: lgrp_spaces
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parsability.
void MutableNUMASpace::ensure_parsability() {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
LGRPSpace *ls = lgrp_spaces()->at(i);
MutableSpace *s = ls->space();
if (s->top() < top()) { // For all spaces preceding the one containing top()
if (s->free_in_words() > 0) {
intptr_t cur_top = (intptr_t)s->top();
size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
while (words_left_to_fill > 0) {
size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
assert(words_to_fill >= CollectedHeap::min_fill_size(),
"Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size());
CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
if (!os::numa_has_static_binding()) {
size_t touched_words = words_to_fill;
#ifndef ASSERT
if (!ZapUnusedHeapArea) {
touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
touched_words);
}
#endif
MemRegion invalid;
HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
if (crossing_start != crossing_end) {
// If object header crossed a small page boundary we mark the area
// as invalid rounding it to a page_size().
HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
invalid = MemRegion(start, end);
}
ls->add_invalid_region(invalid);
}
cur_top = cur_top + (words_to_fill * HeapWordSize);
words_left_to_fill -= words_to_fill;
}
}
} else {
if (!os::numa_has_static_binding()) {
#ifdef ASSERT
MemRegion invalid(s->top(), s->end());
ls->add_invalid_region(invalid);
#else
if (ZapUnusedHeapArea) {
MemRegion invalid(s->top(), s->end());
ls->add_invalid_region(invalid);
} else {
return;
}
#endif
} else {
return;
}
}
}
}
示例8: round_to
int RelocIterator::locs_and_index_size(int code_size, int locs_size) {
if (!UseRelocIndex) return locs_size; // no index
code_size = round_to(code_size, oopSize);
locs_size = round_to(locs_size, oopSize);
int index_size = num_cards(code_size) * sizeof(RelocIndexEntry);
// format of indexed relocs:
// relocation_begin: relocInfo ...
// index: (addr,reloc#) ...
// indexSize :relocation_end
return locs_size + index_size + BytesPerInt;
}
示例9: lgrp_spaces
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parseability.
void MutableNUMASpace::ensure_parsability() {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
LGRPSpace *ls = lgrp_spaces()->at(i);
MutableSpace *s = ls->space();
if (s->top() < top()) { // For all spaces preceding the one containing top()
if (s->free_in_words() > 0) {
size_t area_touched_words = pointer_delta(s->end(), s->top());
CollectedHeap::fill_with_object(s->top(), area_touched_words);
#ifndef ASSERT
if (!ZapUnusedHeapArea) {
area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
area_touched_words);
}
#endif
if (!os::numa_has_static_binding()) {
MemRegion invalid;
HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
os::vm_page_size());
if (crossing_start != crossing_end) {
// If object header crossed a small page boundary we mark the area
// as invalid rounding it to a page_size().
HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
s->end());
invalid = MemRegion(start, end);
}
ls->add_invalid_region(invalid);
}
}
} else {
if (!os::numa_has_static_binding()) {
#ifdef ASSERT
MemRegion invalid(s->top(), s->end());
ls->add_invalid_region(invalid);
#else
if (ZapUnusedHeapArea) {
MemRegion invalid(s->top(), s->end());
ls->add_invalid_region(invalid);
} else {
return;
}
#endif
} else {
return;
}
}
}
}
示例10: GPGC_unlink
// --- unlink ----------------------------------------------------------------
// GPGC unlink any MSB's whose method has died.
void MethodStubBlob::GPGC_unlink( address from, address to ) {
address adr = (address)round_to((intptr_t)from,CodeEntryAlignment);
for( ; adr+NativeMethodStub::instruction_size < to; adr+=NativeMethodStub::instruction_size ) {
NativeMethodStub *nms = (NativeMethodStub*)adr;
heapRef ref=nms->get_oop();
if(ref.not_null()){
assert(ref.is_old(), "CodeCache should only have old-space oops");
if ( GPGC_ReadTrapArray::is_remap_trapped(ref) ) {
assert0(GPGC_ReadTrapArray::is_old_gc_remap_trapped(ref));
ref = GPGC_Collector::get_forwarded_object(ref);
}
assert(ref.as_oop()->is_oop(), "not oop");
if ( ! GPGC_Marks::is_old_marked_strong_live(ref) ) {
free_stub(nms);
} else {
// Any NativeMethodStub we don't free, we instead must mark through the objectRef to
// get consistent NMT bits and remapped addresses.
GPGC_OldCollector::mark_to_live(nms->oop_addr());
}
}
}
}
示例11: new
void* VtableStub::operator new(size_t size, int code_size) throw() {
assert(size == sizeof(VtableStub), "mismatched size");
// compute real VtableStub size (rounded to nearest word)
const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
// malloc them in chunks to minimize header overhead
const int chunk_factor = 32;
if (_chunk == NULL || _chunk + real_size > _chunk_end) {
const int bytes = chunk_factor * real_size + pd_code_alignment();
// There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
// If changing the name, update the other file accordingly.
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
return NULL;
}
_chunk = blob->content_begin();
_chunk_end = _chunk + bytes;
Forte::register_stub("vtable stub", _chunk, _chunk_end);
align_chunk();
}
assert(_chunk + real_size <= _chunk_end, "bad allocation");
void* res = _chunk;
_chunk += real_size;
align_chunk();
return res;
}
示例12: assert_byte_count_ok
static void assert_byte_count_ok(size_t byte_count, size_t unit_size) {
#ifdef ASSERT
if ((size_t)round_to(byte_count, unit_size) != byte_count) {
basic_fatal("byte count must be aligned");
}
#endif
}
示例13: round_to
// asm based interpreter deoptimization helpers
int AbstractInterpreter::size_activation(int max_stack,
int temps,
int extra_args,
int monitors,
int callee_params,
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in TemplateInterpreterGenerator::generate_method_entry.
// fixed size of an interpreter frame:
int overhead = frame::sender_sp_offset -
frame::interpreter_frame_initial_sp_offset;
// Our locals were accounted for by the caller (or last_frame_adjust
// on the transistion) Since the callee parameters already account
// for the callee's params we only need to account for the extra
// locals.
int size = overhead +
(callee_locals - callee_params)*Interpreter::stackElementWords +
monitors * frame::interpreter_frame_monitor_size() +
temps* Interpreter::stackElementWords + extra_args;
// On AArch64 we always keep the stack pointer 16-aligned, so we
// must round up here.
size = round_to(size, 2);
return size;
}
示例14: new
void* VtableStub::operator new(size_t size, int code_size) {
assert(size == sizeof(VtableStub), "mismatched size");
num_vtable_chunks++;
// compute real VtableStub size (rounded to nearest word)
const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
// malloc them in chunks to minimize header overhead
const int chunk_factor = 32;
if (_chunk == NULL || _chunk + real_size > _chunk_end) {
const int bytes = chunk_factor * real_size + pd_code_alignment();
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks");
}
_chunk = blob->instructions_begin();
_chunk_end = _chunk + bytes;
Forte::register_stub("vtable stub", _chunk, _chunk_end);
// Notify JVMTI about this stub. The event will be recorded by the enclosing
// JvmtiDynamicCodeEventCollector and posted when this thread has released
// all locks.
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
}
align_chunk();
}
assert(_chunk + real_size <= _chunk_end, "bad allocation");
void* res = _chunk;
_chunk += real_size;
align_chunk();
return res;
}
示例15: round_to
// asm based interpreter deoptimization helpers
int AbstractInterpreter::size_activation(int max_stack,
int tempcount,
int extra_args,
int moncount,
int callee_param_count,
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in TemplateInterpreterGenerator::generate_fixed_frame.
// fixed size of an interpreter frame:
int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
// Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
// Since the callee parameters already account for the callee's params we only need to account for
// the extra locals.
int size = overhead +
((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
(moncount*frame::interpreter_frame_monitor_size()) +
tempcount*Interpreter::stackElementWords + extra_args;
#ifdef AARCH64
size = round_to(size, StackAlignmentInBytes/BytesPerWord);
#endif // AARCH64
return size;
}