本文整理汇总了C++中methodOop::size_of_parameters方法的典型用法代码示例。如果您正苦于以下问题:C++ methodOop::size_of_parameters方法的具体用法?C++ methodOop::size_of_parameters怎么用?C++ methodOop::size_of_parameters使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类methodOop
的用法示例。
在下文中一共展示了methodOop::size_of_parameters方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: check_stack_and_locals_size
int MethodComparator::check_stack_and_locals_size(methodOop old_method, methodOop new_method) {
if (old_method->max_stack() != new_method->max_stack()) {
return 1;
} else if (old_method->max_locals() != new_method->max_locals()) {
return 2;
} else if (old_method->size_of_parameters() != new_method->size_of_parameters()) {
return 3;
} else return 0;
}
示例2: hash
// for hashing into the table
static int hash(methodOop method) {
// The point here is to try to make something fairly unique
// out of the fields we can read without grabbing any locks
// since the method may be locked when we need the hash.
return (
method->code_size() ^
method->max_stack() ^
method->max_locals() ^
method->size_of_parameters());
}
示例3: empty_entry
int CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
// Drop into the slow path if we need a safepoint check
if (SafepointSynchronize::do_call_back()) {
return normal_entry(method, 0, THREAD);
}
// Pop our parameters
stack->set_sp(stack->sp() + method->size_of_parameters());
// No deoptimized frames on the stack
return 0;
}
示例4: native_entry
//.........这里部分代码省略.........
istate->set_oop_temp(*(oop *) result[0]);
else
istate->set_oop_temp(NULL);
}
// Reset handle block
thread->active_handles()->clear();
unlock_unwind_and_return:
// Unlock if necessary
if (monitor) {
BasicLock *lock = monitor->lock();
markOop header = lock->displaced_header();
oop rcvr = monitor->obj();
monitor->set_obj(NULL);
if (header != NULL) {
if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
monitor->set_obj(rcvr); {
HandleMark hm(thread);
CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor));
}
}
}
}
unwind_and_return:
// Unwind the current activation
thread->pop_zero_frame();
// Pop our parameters
stack->set_sp(stack->sp() + method->size_of_parameters());
// Push our result
if (!HAS_PENDING_EXCEPTION) {
BasicType type = result_type_of(method);
stack->set_sp(stack->sp() - type2size[type]);
switch (type) {
case T_VOID:
break;
case T_BOOLEAN:
#ifndef VM_LITTLE_ENDIAN
result[0] <<= (BitsPerWord - BitsPerByte);
#endif
SET_LOCALS_INT(*(jboolean *) result != 0, 0);
break;
case T_CHAR:
#ifndef VM_LITTLE_ENDIAN
result[0] <<= (BitsPerWord - BitsPerShort);
#endif
SET_LOCALS_INT(*(jchar *) result, 0);
break;
case T_BYTE:
#ifndef VM_LITTLE_ENDIAN
result[0] <<= (BitsPerWord - BitsPerByte);
#endif
SET_LOCALS_INT(*(jbyte *) result, 0);
break;
case T_SHORT:
示例5: build_repack_buffer
// --- build_repack_buffer ---------------------------------------------------
// Build a IFrame structure to help ASM code repack the 1 compiled frame into
// many interpreter (or C1) frames. Takes in the current thread and a vframe;
// the vframe is pointing and the virtual Java frame needing to be repacked.
// It takes in the callee (which this frame is busy trying to call in it's
// inlined code), and an array of IFrames. It returns the updated IFrame
// buffer filled in for this frame.
void Deoptimization::build_repack_buffer( JavaThread *thread, frame fr, IFrame *buf, const DebugMap *dm, const DebugScope *ds, intptr_t *jexstk, objectRef *lckstk, bool is_deopt, bool is_c1, bool is_youngest) {
assert( thread->_deopt_buffer->contains((char*)(buf+1)), "over-ran large deopt buffer?" );
int bci=ds->bci();
if(bci==InvocationEntryBci){
// We deoptimized while hanging in prologue code for a synchronized
// method. We got the lock (after all, deopt happens after returning
// from the blocking call). We want to begin execution in the
// interpreter at BCI 0, and after taking the lock.
// Also it is possilble to enter the deopt code through the br_s on method
// entry before the first byte code.
bci = 0;
}
const methodOop moop = ds->method().as_methodOop();
if( ds->caller() ) { // Do I have a caller? Am I mid-call?
// Initialize the constant pool entry for caller-parameter size. It
// might be the case that we inlined and compiled a callee, and are busy
// calling it in the compiled code, and get deoptimized with that callee
// in-progress AND we've never executed it in the interpreter - which
// would have filled in the constant pool cache before making the call.
// Fill it in now.
const methodOop caller = ds->caller()->method().as_methodOop();
int index = Bytes::get_native_u2(caller->bcp_from(ds->caller()->bci())+1);
ConstantPoolCacheEntry *cpe = caller->constants()->cache()->entry_at(index);
// Since we are setting the constant pool entry here, and another thread
// could be busy resolving here we have a race condition setting the
// flags. Use a CAS to only set the flags if they are currently 0.
intx *flags_adr = (intx*)((intptr_t)cpe + in_bytes(ConstantPoolCacheEntry::flags_offset()));
if( !*flags_adr ) { // Flags currently 0?
// Set the flags, because the interpreter-return-entry points need some
// info from them. Not all fields are set, because it's too complex to
// do it here... and not needed. The cpCacheEntry is left "unresolved"
// such that the next real use of it from the interpreter will be forced
// to do a proper resolve, which will fill in the missing fields.
// Compute new flags needed by the interpreter-return-entry
intx flags =
(moop->size_of_parameters() & 0xFF) |
(1 << ConstantPoolCacheEntry::hotSwapBit) |
(moop->result_type() << ConstantPoolCacheEntry::tosBits);
// CAS 'em in, but only if there is currently a 0 flags
assert0( sizeof(jlong)==sizeof(intx) );
Atomic::cmpxchg((jlong)flags, (jlong*)flags_adr, 0);
// We don't care about the result, because the cache is monomorphic.
// Either our CAS succeeded and jammed the right parameter count, or
// another thread succeeded and jammed in the right parameter count.
}
}
if (TraceDeoptimization) {
BufferedLoggerMark m(NOTAG, Log::M_DEOPT, TraceDeoptimization, true);
m.out("DEOPT REPACK c%d: ", is_c1 ? 1 : 2);
moop->print_short_name(m.stream());
m.out(" @ bci %d %s", bci, ds->caller() ? "called by...": " (oldest frame)" );
}
// If there was a suitable C1 frame, use it.
// Otherwise, use an interpreter frame.
if( 1 ) {
// Build an interpreter-style IFrame. Naked oops abound.
assert0( !objectRef(moop).is_stack() );
buf->_mref = objectRef(moop);
buf->_cpc = moop->constants()->cacheRef();
// Compute monitor list length. If we have coarsened a lock we will end
// up unlocking it and the repack buffer will not need to see it.
uint mons_len = ds->numlocks();
if( ds->is_extra_lock() ) { mons_len--; assert0( mons_len >= 0 ); }
assert0( mons_len < (256*sizeof(buf->_numlck)) );
buf->_numlck = mons_len;
// Set up the return pc for the next frame: the next frame is a younger
// frame which will return to this older frame. All middle frames return
// back into the interpreter, just after a call with proper TOS state.
// Youngest frames always start in vtos state because the uncommon-trap
// blob sets them up that way.
const address bcp = moop->bcp_from(bci);
Bytecodes::Code c = Bytecodes::java_code(Bytecodes::cast(*bcp));
BasicType return_type=T_VOID;
bool handle_popframe = is_youngest && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution();
int bci_bump = 0;
if( !is_youngest ) { // Middle-frame?
bool from_call = (c == Bytecodes::_invokevirtual ||
c==Bytecodes::_invokespecial||
c==Bytecodes::_invokestatic||
c == Bytecodes::_invokeinterface );
assert(from_call,"Middle frame is in the middle of a call");
bci_bump = Bytecodes::length_at(bcp); // But need to know how much it will be bumped for the return address
buf->_bci = bci; // Save bci without bumping it; normal interpreter call returns bump the bci as needed
buf[-1]._retadr = Interpreter::return_entry(vtos, bci_bump);
//.........这里部分代码省略.........