本文整理汇总了C++中methodOop::constants方法的典型用法代码示例。如果您正苦于以下问题:C++ methodOop::constants方法的具体用法?C++ methodOop::constants怎么用?C++ methodOop::constants使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类methodOop
的用法示例。
在下文中一共展示了methodOop::constants方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: methods_EMCP
bool MethodComparator::methods_EMCP(methodOop old_method, methodOop new_method) {
if (old_method->code_size() != new_method->code_size())
return false;
if (check_stack_and_locals_size(old_method, new_method) != 0) {
// RC_TRACE macro has an embedded ResourceMark
RC_TRACE(0x00800000, ("Methods %s non-comparable with diagnosis %d",
old_method->name()->as_C_string(),
check_stack_and_locals_size(old_method, new_method)));
return false;
}
_old_cp = old_method->constants();
_new_cp = new_method->constants();
BytecodeStream s_old(old_method);
BytecodeStream s_new(new_method);
_s_old = &s_old;
_s_new = &s_new;
_switchable_test = false;
Bytecodes::Code c_old, c_new;
while ((c_old = s_old.next()) >= 0) {
if ((c_new = s_new.next()) < 0 || c_old != c_new)
return false;
if (! args_same(c_old, c_new))
return false;
}
return true;
}
示例2: methods_switchable
bool MethodComparator::methods_switchable(methodOop old_method, methodOop new_method,
BciMap &bci_map) {
if (old_method->code_size() > new_method->code_size())
// Something has definitely been deleted in the new method, compared to the old one.
return false;
if (! check_stack_and_locals_size(old_method, new_method))
return false;
_old_cp = old_method->constants();
_new_cp = new_method->constants();
BytecodeStream s_old(old_method);
BytecodeStream s_new(new_method);
_s_old = &s_old;
_s_new = &s_new;
_bci_map = &bci_map;
_switchable_test = true;
GrowableArray<int> fwd_jmps(16);
_fwd_jmps = &fwd_jmps;
Bytecodes::Code c_old, c_new;
while ((c_old = s_old.next()) >= 0) {
if ((c_new = s_new.next()) < 0)
return false;
if (! (c_old == c_new && args_same(c_old, c_new))) {
int old_bci = s_old.bci();
int new_st_bci = s_new.bci();
bool found_match = false;
do {
c_new = s_new.next();
if (c_new == c_old && args_same(c_old, c_new)) {
found_match = true;
break;
}
} while (c_new >= 0);
if (! found_match)
return false;
int new_end_bci = s_new.bci();
bci_map.store_fragment_location(old_bci, new_st_bci, new_end_bci);
}
}
// Now we can test all forward jumps
for (int i = 0; i < fwd_jmps.length() / 2; i++) {
if (! bci_map.old_and_new_locations_same(fwd_jmps.at(i*2), fwd_jmps.at(i*2+1))) {
RC_TRACE(0x00800000,
("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d",
fwd_jmps.at(i*2), bci_map.new_bci_for_old(fwd_jmps.at(i*2)),
fwd_jmps.at(i*2+1)));
return false;
}
}
return true;
}
示例3: layout_interpreterState
void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
frame* caller,
frame* current,
methodOop method,
intptr_t* locals,
intptr_t* stack,
intptr_t* stack_base,
intptr_t* monitor_base,
intptr_t* frame_bottom,
bool is_top_frame) {
istate->set_locals(locals);
istate->set_method(method);
istate->set_self_link(istate);
istate->set_prev_link(NULL);
// thread will be set by a hacky repurposing of frame::patch_pc()
// bcp will be set by vframeArrayElement::unpack_on_stack()
istate->set_constants(method->constants()->cache());
istate->set_msg(BytecodeInterpreter::method_resume);
istate->set_bcp_advance(0);
istate->set_oop_temp(NULL);
istate->set_mdx(NULL);
if (caller->is_interpreted_frame()) {
interpreterState prev = caller->get_interpreterState();
prev->set_callee(method);
if (*prev->bcp() == Bytecodes::_invokeinterface)
prev->set_bcp_advance(5);
else
prev->set_bcp_advance(3);
}
istate->set_callee(NULL);
istate->set_monitor_base((BasicObjectLock *) monitor_base);
istate->set_stack_base(stack_base);
istate->set_stack(stack);
istate->set_stack_limit(stack_base - method->max_stack() - 1);
}
示例4: result_type
BasicType Bytecode_static::result_type(methodOop method) const {
int index = java_hwrd_at(1);
constantPoolOop constants = method->constants();
symbolOop field_type = constants->signature_ref_at(index);
BasicType basic_type = FieldType::basic_type(field_type);
return basic_type;
}
示例5: constant_pool_type
static BasicType constant_pool_type(methodOop method, int index) {
constantTag tag = method->constants()->tag_at(index);
if (tag.is_int ()) return T_INT;
else if (tag.is_float ()) return T_FLOAT;
else if (tag.is_long ()) return T_LONG;
else if (tag.is_double ()) return T_DOUBLE;
else if (tag.is_string ()) return T_OBJECT;
else if (tag.is_unresolved_string()) return T_OBJECT;
else if (tag.is_klass ()) return T_OBJECT;
ShouldNotReachHere();
return T_ILLEGAL;
}
示例6: accessor_entry
int CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
intptr_t *locals = stack->sp();
// Drop into the slow path if we need a safepoint check
if (SafepointSynchronize::do_call_back()) {
return normal_entry(method, 0, THREAD);
}
// Load the object pointer and drop into the slow path
// if we have a NullPointerException
oop object = LOCALS_OBJECT(0);
if (object == NULL) {
return normal_entry(method, 0, THREAD);
}
// Read the field index from the bytecode, which looks like this:
// 0: aload_0
// 1: getfield
// 2: index
// 3: index
// 4: ireturn/areturn
// NB this is not raw bytecode: index is in machine order
u1 *code = method->code_base();
assert(code[0] == Bytecodes::_aload_0 &&
code[1] == Bytecodes::_getfield &&
(code[4] == Bytecodes::_ireturn ||
code[4] == Bytecodes::_areturn), "should do");
u2 index = Bytes::get_native_u2(&code[2]);
// Get the entry from the constant pool cache, and drop into
// the slow path if it has not been resolved
constantPoolCacheOop cache = method->constants()->cache();
ConstantPoolCacheEntry* entry = cache->entry_at(index);
if (!entry->is_resolved(Bytecodes::_getfield)) {
return normal_entry(method, 0, THREAD);
}
// Get the result and push it onto the stack
switch (entry->flag_state()) {
case ltos:
case dtos:
stack->overflow_check(1, CHECK_0);
stack->alloc(wordSize);
break;
}
if (entry->is_volatile()) {
switch (entry->flag_state()) {
case ctos:
SET_LOCALS_INT(object->char_field_acquire(entry->f2()), 0);
break;
case btos:
SET_LOCALS_INT(object->byte_field_acquire(entry->f2()), 0);
break;
case stos:
SET_LOCALS_INT(object->short_field_acquire(entry->f2()), 0);
break;
case itos:
SET_LOCALS_INT(object->int_field_acquire(entry->f2()), 0);
break;
case ltos:
SET_LOCALS_LONG(object->long_field_acquire(entry->f2()), 0);
break;
case ftos:
SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2()), 0);
break;
case dtos:
SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2()), 0);
break;
case atos:
SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2()), 0);
break;
default:
ShouldNotReachHere();
}
}
else {
switch (entry->flag_state()) {
case ctos:
SET_LOCALS_INT(object->char_field(entry->f2()), 0);
break;
case btos:
SET_LOCALS_INT(object->byte_field(entry->f2()), 0);
break;
case stos:
SET_LOCALS_INT(object->short_field(entry->f2()), 0);
break;
case itos:
//.........这里部分代码省略.........
示例7: native_entry
int CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
// Make sure method is native and not abstract
assert(method->is_native() && !method->is_abstract(), "should be");
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
// Allocate and initialize our frame
InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
thread->push_zero_frame(frame);
interpreterState istate = frame->interpreter_state();
intptr_t *locals = istate->locals();
// Update the invocation counter
if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
InvocationCounter *counter = method->invocation_counter();
counter->increment();
if (counter->reached_InvocationLimit()) {
CALL_VM_NOCHECK(
InterpreterRuntime::frequency_counter_overflow(thread, NULL));
if (HAS_PENDING_EXCEPTION)
goto unwind_and_return;
}
}
// Lock if necessary
BasicObjectLock *monitor;
monitor = NULL;
if (method->is_synchronized()) {
monitor = (BasicObjectLock*) istate->stack_base();
oop lockee = monitor->obj();
markOop disp = lockee->mark()->set_unlocked();
monitor->lock()->set_displaced_header(disp);
if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) {
if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
monitor->lock()->set_displaced_header(NULL);
}
else {
CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
if (HAS_PENDING_EXCEPTION)
goto unwind_and_return;
}
}
}
// Get the signature handler
InterpreterRuntime::SignatureHandler *handler; {
address handlerAddr = method->signature_handler();
if (handlerAddr == NULL) {
CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method));
if (HAS_PENDING_EXCEPTION)
goto unlock_unwind_and_return;
handlerAddr = method->signature_handler();
assert(handlerAddr != NULL, "eh?");
}
if (handlerAddr == (address) InterpreterRuntime::slow_signature_handler) {
CALL_VM_NOCHECK(handlerAddr =
InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL));
if (HAS_PENDING_EXCEPTION)
goto unlock_unwind_and_return;
}
handler = \
InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr);
}
// Get the native function entry point
address function;
function = method->native_function();
assert(function != NULL, "should be set if signature handler is");
// Build the argument list
stack->overflow_check(handler->argument_count() * 2, THREAD);
if (HAS_PENDING_EXCEPTION)
goto unlock_unwind_and_return;
void **arguments;
void *mirror; {
arguments =
(void **) stack->alloc(handler->argument_count() * sizeof(void **));
void **dst = arguments;
void *env = thread->jni_environment();
*(dst++) = &env;
if (method->is_static()) {
istate->set_oop_temp(
method->constants()->pool_holder()->java_mirror());
mirror = istate->oop_temp_addr();
*(dst++) = &mirror;
}
intptr_t *src = locals;
for (int i = dst - arguments; i < handler->argument_count(); i++) {
ffi_type *type = handler->argument_type(i);
if (type == &ffi_type_pointer) {
if (*src) {
stack->push((intptr_t) src);
*(dst++) = stack->sp();
//.........这里部分代码省略.........
示例8: deopt_continue_after_entry
// If deoptimization happens, this function returns the point of next bytecode to continue execution
address AbstractInterpreter::deopt_continue_after_entry(methodOop method, address bcp, int callee_parameters, bool is_top_frame) {
assert(method->contains(bcp), "just checkin'");
Bytecodes::Code code = Bytecodes::java_code_at(bcp);
assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
int bci = method->bci_from(bcp);
int length = -1; // initial value for debugging
// compute continuation length
length = Bytecodes::length_at(bcp);
// compute result type
BasicType type = T_ILLEGAL;
switch (code) {
case Bytecodes::_invokevirtual :
case Bytecodes::_invokespecial :
case Bytecodes::_invokestatic :
case Bytecodes::_invokeinterface: {
Thread *thread = Thread::current();
ResourceMark rm(thread);
methodHandle mh(thread, method);
type = Bytecode_invoke_at(mh, bci)->result_type(thread);
// since the cache entry might not be initialized:
// (NOT needed for the old calling convension)
if (!is_top_frame) {
int index = Bytes::get_native_u2(bcp+1);
method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
}
break;
}
case Bytecodes::_invokedynamic: {
Thread *thread = Thread::current();
ResourceMark rm(thread);
methodHandle mh(thread, method);
type = Bytecode_invoke_at(mh, bci)->result_type(thread);
// since the cache entry might not be initialized:
// (NOT needed for the old calling convension)
if (!is_top_frame) {
int index = Bytes::get_native_u4(bcp+1);
method->constants()->cache()->secondary_entry_at(index)->set_parameter_size(callee_parameters);
}
break;
}
case Bytecodes::_ldc :
case Bytecodes::_ldc_w : // fall through
case Bytecodes::_ldc2_w:
{
Thread *thread = Thread::current();
ResourceMark rm(thread);
methodHandle mh(thread, method);
type = Bytecode_loadconstant_at(mh, bci)->result_type();
break;
}
default:
type = Bytecodes::result_type(code);
break;
}
// return entry point for computed continuation state & bytecode length
return
is_top_frame
? Interpreter::deopt_entry (as_TosState(type), length)
: Interpreter::return_entry(as_TosState(type), length);
}
示例9: build_repack_buffer
// --- build_repack_buffer ---------------------------------------------------
// Build a IFrame structure to help ASM code repack the 1 compiled frame into
// many interpreter (or C1) frames. Takes in the current thread and a vframe;
// the vframe is pointing and the virtual Java frame needing to be repacked.
// It takes in the callee (which this frame is busy trying to call in it's
// inlined code), and an array of IFrames. It returns the updated IFrame
// buffer filled in for this frame.
void Deoptimization::build_repack_buffer( JavaThread *thread, frame fr, IFrame *buf, const DebugMap *dm, const DebugScope *ds, intptr_t *jexstk, objectRef *lckstk, bool is_deopt, bool is_c1, bool is_youngest) {
assert( thread->_deopt_buffer->contains((char*)(buf+1)), "over-ran large deopt buffer?" );
int bci=ds->bci();
if(bci==InvocationEntryBci){
// We deoptimized while hanging in prologue code for a synchronized
// method. We got the lock (after all, deopt happens after returning
// from the blocking call). We want to begin execution in the
// interpreter at BCI 0, and after taking the lock.
// Also it is possilble to enter the deopt code through the br_s on method
// entry before the first byte code.
bci = 0;
}
const methodOop moop = ds->method().as_methodOop();
if( ds->caller() ) { // Do I have a caller? Am I mid-call?
// Initialize the constant pool entry for caller-parameter size. It
// might be the case that we inlined and compiled a callee, and are busy
// calling it in the compiled code, and get deoptimized with that callee
// in-progress AND we've never executed it in the interpreter - which
// would have filled in the constant pool cache before making the call.
// Fill it in now.
const methodOop caller = ds->caller()->method().as_methodOop();
int index = Bytes::get_native_u2(caller->bcp_from(ds->caller()->bci())+1);
ConstantPoolCacheEntry *cpe = caller->constants()->cache()->entry_at(index);
// Since we are setting the constant pool entry here, and another thread
// could be busy resolving here we have a race condition setting the
// flags. Use a CAS to only set the flags if they are currently 0.
intx *flags_adr = (intx*)((intptr_t)cpe + in_bytes(ConstantPoolCacheEntry::flags_offset()));
if( !*flags_adr ) { // Flags currently 0?
// Set the flags, because the interpreter-return-entry points need some
// info from them. Not all fields are set, because it's too complex to
// do it here... and not needed. The cpCacheEntry is left "unresolved"
// such that the next real use of it from the interpreter will be forced
// to do a proper resolve, which will fill in the missing fields.
// Compute new flags needed by the interpreter-return-entry
intx flags =
(moop->size_of_parameters() & 0xFF) |
(1 << ConstantPoolCacheEntry::hotSwapBit) |
(moop->result_type() << ConstantPoolCacheEntry::tosBits);
// CAS 'em in, but only if there is currently a 0 flags
assert0( sizeof(jlong)==sizeof(intx) );
Atomic::cmpxchg((jlong)flags, (jlong*)flags_adr, 0);
// We don't care about the result, because the cache is monomorphic.
// Either our CAS succeeded and jammed the right parameter count, or
// another thread succeeded and jammed in the right parameter count.
}
}
if (TraceDeoptimization) {
BufferedLoggerMark m(NOTAG, Log::M_DEOPT, TraceDeoptimization, true);
m.out("DEOPT REPACK c%d: ", is_c1 ? 1 : 2);
moop->print_short_name(m.stream());
m.out(" @ bci %d %s", bci, ds->caller() ? "called by...": " (oldest frame)" );
}
// If there was a suitable C1 frame, use it.
// Otherwise, use an interpreter frame.
if( 1 ) {
// Build an interpreter-style IFrame. Naked oops abound.
assert0( !objectRef(moop).is_stack() );
buf->_mref = objectRef(moop);
buf->_cpc = moop->constants()->cacheRef();
// Compute monitor list length. If we have coarsened a lock we will end
// up unlocking it and the repack buffer will not need to see it.
uint mons_len = ds->numlocks();
if( ds->is_extra_lock() ) { mons_len--; assert0( mons_len >= 0 ); }
assert0( mons_len < (256*sizeof(buf->_numlck)) );
buf->_numlck = mons_len;
// Set up the return pc for the next frame: the next frame is a younger
// frame which will return to this older frame. All middle frames return
// back into the interpreter, just after a call with proper TOS state.
// Youngest frames always start in vtos state because the uncommon-trap
// blob sets them up that way.
const address bcp = moop->bcp_from(bci);
Bytecodes::Code c = Bytecodes::java_code(Bytecodes::cast(*bcp));
BasicType return_type=T_VOID;
bool handle_popframe = is_youngest && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution();
int bci_bump = 0;
if( !is_youngest ) { // Middle-frame?
bool from_call = (c == Bytecodes::_invokevirtual ||
c==Bytecodes::_invokespecial||
c==Bytecodes::_invokestatic||
c == Bytecodes::_invokeinterface );
assert(from_call,"Middle frame is in the middle of a call");
bci_bump = Bytecodes::length_at(bcp); // But need to know how much it will be bumped for the return address
buf->_bci = bci; // Save bci without bumping it; normal interpreter call returns bump the bci as needed
buf[-1]._retadr = Interpreter::return_entry(vtos, bci_bump);
//.........这里部分代码省略.........