本文整理汇总了C++中Compile类的典型用法代码示例。如果您正苦于以下问题:C++ Compile类的具体用法?C++ Compile怎么用?C++ Compile使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Compile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: for_invokedynamic_inline
CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
ciMethodHandle* method_handle = call_site->get_target();
// Set the callee to have access to the class and signature in the
// MethodHandleCompiler.
method_handle->set_callee(callee);
method_handle->set_caller(caller);
method_handle->set_call_profile(profile);
// Get an adapter for the MethodHandle.
ciMethod* target_method = method_handle->get_invokedynamic_adapter();
if (target_method != NULL) {
Compile *C = Compile::current();
CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
if (cg != NULL && cg->is_inline()) {
// Add a dependence for invalidation of the optimization.
if (!call_site->is_constant_call_site()) {
C->dependencies()->assert_call_site_target_value(call_site, method_handle);
}
return cg;
}
}
return NULL;
}
示例2: assert
JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
Compile* C = Compile::current();
if (is_osr()) {
// The JVMS for a OSR has a single argument (see its TypeFunc).
assert(jvms->depth() == 1, "no inline OSR");
}
if (C->failing()) {
return NULL; // bailing out of the compile; do not try to parse
}
Parse parser(jvms, method(), _expected_uses, parent_parser);
// Grab signature for matching/allocation
#ifdef ASSERT
if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
assert(C->env()->system_dictionary_modification_counter_changed(),
"Must invalidate if TypeFuncs differ");
}
#endif
GraphKit& exits = parser.exits();
if (C->failing()) {
while (exits.pop_exception_state() != NULL) ;
return NULL;
}
assert(exits.jvms()->same_calls_as(jvms), "sanity");
// Simply return the exit state of the parser,
// augmented by any exceptional states.
return exits.transfer_exceptions_into_jvms();
}
示例3: populate_free_list
void IndexSet::populate_free_list() {
Compile *compile = Compile::current();
BitBlock *free = (BitBlock*)compile->indexSet_free_block_list();
char *mem = (char*)arena()->Amalloc_4(sizeof(BitBlock) *
bitblock_alloc_chunk_size + 32);
// Align the pointer to a 32 bit boundary.
BitBlock *new_blocks = (BitBlock*)(((uintptr_t)mem + 32) & ~0x001F);
// Add the new blocks to the free list.
for (int i = 0; i < bitblock_alloc_chunk_size; i++) {
new_blocks->set_next(free);
free = new_blocks;
new_blocks++;
}
compile->set_indexSet_free_block_list(free);
#ifdef ASSERT
if (CollectIndexSetStatistics) {
_alloc_new += bitblock_alloc_chunk_size;
}
#endif
}
示例4: new
inline void* operator new( size_t x ) {
Compile* compile = Compile::current();
compile->set_type_last_size(x);
void *temp = compile->type_arena()->Amalloc_D(x);
compile->set_type_hwm(temp);
return temp;
}
示例5: print_inlining_late
virtual void print_inlining_late(const char* msg) {
CallNode* call = call_node();
Compile* C = Compile::current();
C->print_inlining_assert_ready();
C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
C->print_inlining_move_to(this);
C->print_inlining_update_delayed(this);
}
示例6: generate
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
Compile *C = Compile::current();
C->print_inlining_skip(this);
C->add_boxing_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
return new_jvms;
}
示例7: kit
JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
Compile* C = kit.C;
PhaseGVN& gvn = kit.gvn();
if (C->log() != NULL) {
C->log()->elem("dynamic_call bci='%d'", jvms->bci());
}
// Get the constant pool cache from the caller class.
ciMethod* caller_method = jvms->method();
ciBytecodeStream str(caller_method);
str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!");
ciCPCache* cpcache = str.get_cpcache();
// Get the offset of the CallSite from the constant pool cache
// pointer.
int index = str.get_method_index();
size_t call_site_offset = cpcache->get_f1_offset(index);
// Load the CallSite object from the constant pool cache.
const TypeOopPtr* cpcache_type = TypeOopPtr::make_from_constant(cpcache); // returns TypeAryPtr of type T_OBJECT
const TypeOopPtr* call_site_type = TypeOopPtr::make_from_klass(C->env()->CallSite_klass());
Node* cpcache_adr = kit.makecon(cpcache_type);
Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, call_site_offset);
// The oops in the constant pool cache are not compressed; load then as raw pointers.
Node* call_site = kit.make_load(kit.control(), call_site_adr, call_site_type, T_ADDRESS, Compile::AliasIdxRaw);
// Load the target MethodHandle from the CallSite object.
const TypeOopPtr* target_type = TypeOopPtr::make_from_klass(C->env()->MethodHandle_klass());
Node* target_mh_adr = kit.basic_plus_adr(call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
Node* target_mh = kit.make_load(kit.control(), target_mh_adr, target_type, T_OBJECT);
address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub();
CallStaticJavaNode* call = new (C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci());
// invokedynamic is treated as an optimized invokevirtual.
call->set_optimized_virtual(true);
// Take extra care (in the presence of argument motion) not to trash the SP:
call->set_method_handle_invoke(true);
// Pass the target MethodHandle as first argument and shift the
// other arguments.
call->init_req(0 + TypeFunc::Parms, target_mh);
uint nargs = call->method()->arg_size();
for (uint i = 1; i < nargs; i++) {
Node* arg = kit.argument(i - 1);
call->init_req(i + TypeFunc::Parms, arg);
}
kit.set_edges_for_java_call(call);
Node* ret = kit.set_results_for_java_call(call);
kit.push_node(method()->return_type()->basic_type(), ret);
return kit.transfer_exceptions_into_jvms();
}
示例8: generate
virtual JVMState* generate(JVMState* jvms) {
Compile *C = Compile::current();
C->log_inline_id(this);
C->add_boxing_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
return new_jvms;
}
示例9: num_opnds
//------------------------------dump_spec--------------------------------------
// Print any per-operand special info
void MachNode::dump_spec(outputStream *st) const {
uint cnt = num_opnds();
for( uint i=0; i<cnt; i++ )
_opnds[i]->dump_spec(st);
const TypePtr *t = adr_type();
if( t ) {
Compile* C = Compile::current();
if( C->alias_type(t)->is_volatile() )
st->print(" Volatile!");
}
}
示例10: generate
JVMState* WarmCallGenerator::generate(JVMState* jvms) {
Compile* C = Compile::current();
if (C->log() != NULL) {
C->log()->elem("warm_call bci='%d'", jvms->bci());
}
jvms = _if_cold->generate(jvms);
if (jvms != NULL) {
Node* m = jvms->map()->control();
if (m->is_CatchProj()) m = m->in(0);
else m = C->top();
if (m->is_Catch()) m = m->in(0);
else m = C->top();
if (m->is_Proj()) m = m->in(0);
else m = C->top();
if (m->is_CallJava()) {
_call_info->set_call(m->as_Call());
_call_info->set_hot_cg(_if_hot);
#ifndef PRODUCT
if (PrintOpto || PrintOptoInlining) {
tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
tty->print("WCI: ");
_call_info->print();
}
#endif
_call_info->set_heat(_call_info->compute_heat());
C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
}
}
return jvms;
}
示例11:
IndexSet::BitBlock *IndexSet::alloc_block() {
#ifdef ASSERT
if (CollectIndexSetStatistics) {
_alloc_total++;
}
#endif
Compile *compile = Compile::current();
BitBlock* free_list = (BitBlock*)compile->indexSet_free_block_list();
if (free_list == NULL) {
populate_free_list();
free_list = (BitBlock*)compile->indexSet_free_block_list();
}
BitBlock *block = free_list;
compile->set_indexSet_free_block_list(block->next());
block->clear();
return block;
}
示例12: call_node
void LateInlineCallGenerator::do_late_inline() {
// Can't inline it
CallStaticJavaNode* call = call_node();
if (call == NULL || call->outcnt() == 0 ||
call->in(0) == NULL || call->in(0)->is_top()) {
return;
}
const TypeTuple *r = call->tf()->domain();
for (int i1 = 0; i1 < method()->arg_size(); i1++) {
if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
}
if (call->in(TypeFunc::Memory)->is_top()) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
Compile* C = Compile::current();
// Remove inlined methods from Compiler's lists.
if (call->is_macro()) {
C->remove_macro_node(call);
}
// Make a clone of the JVMState that appropriate to use for driving a parse
JVMState* old_jvms = call->jvms();
JVMState* jvms = old_jvms->clone_shallow(C);
uint size = call->req();
SafePointNode* map = new (C) SafePointNode(size, jvms);
for (uint i1 = 0; i1 < size; i1++) {
map->init_req(i1, call->in(i1));
}
// Make sure the state is a MergeMem for parsing.
if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
C->initial_gvn()->set_type_bottom(mem);
map->set_req(TypeFunc::Memory, mem);
}
uint nargs = method()->arg_size();
// blow away old call arguments
Node* top = C->top();
for (uint i1 = 0; i1 < nargs; i1++) {
map->set_req(TypeFunc::Parms + i1, top);
}
jvms->set_map(map);
// Make enough space in the expression stack to transfer
// the incoming arguments and return value.
map->ensure_stack(jvms, jvms->method()->max_stack());
for (uint i1 = 0; i1 < nargs; i1++) {
map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
}
// This check is done here because for_method_handle_inline() method
// needs jvms for inlined state.
if (!do_late_inline_check(jvms)) {
map->disconnect_inputs(NULL, C);
return;
}
C->print_inlining_insert(this);
CompileLog* log = C->log();
if (log != NULL) {
log->head("late_inline method='%d'", log->identify(method()));
JVMState* p = jvms;
while (p != NULL) {
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
p = p->caller();
}
log->tail("late_inline");
}
// Setup default node notes to be picked up by the inlining
Node_Notes* old_nn = C->default_node_notes();
if (old_nn != NULL) {
Node_Notes* entry_nn = old_nn->clone(C);
entry_nn->set_jvms(jvms);
C->set_default_node_notes(entry_nn);
}
// Now perform the inling using the synthesized JVMState
JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
if (new_jvms == NULL) return; // no change
if (C->failing()) return;
// Capture any exceptional control flow
GraphKit kit(new_jvms);
// Find the result object
Node* result = C->top();
int result_size = method()->return_type()->size();
if (result_size != 0 && !kit.stopped()) {
result = (result_size == 1) ? kit.pop() : kit.pop_pair();
}
//.........这里部分代码省略.........
示例13: print_inlining_late
virtual void print_inlining_late(const char* msg) {
CallNode* call = call_node();
Compile* C = Compile::current();
C->print_inlining_insert(this);
C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
}
示例14: end_idx
//.........这里部分代码省略.........
Node *m = _nodes[i4];
if( !ready_cnt[m->_idx] ) // Zero ready count?
worklist.push(m); // Then on to worklist!
}
// Warm up the 'next_call' heuristic bits
needed_for_next_call(_nodes[0], next_call, bbs);
#ifndef PRODUCT
if (TraceOptoPipelining) {
for (uint j=0; j<_nodes.size(); j++) {
Node *n = _nodes[j];
int idx = n->_idx;
tty->print("# ready cnt:%3d ", ready_cnt[idx]);
tty->print("latency:%3d ", node_latency.at_grow(idx));
tty->print("%4d: %s\n", idx, n->Name());
}
}
#endif
// Pull from worklist and schedule
while( worklist.size() ) { // Worklist is not ready
#ifndef PRODUCT
uint before_size = worklist.size();
if (TraceOptoPipelining && before_size > 1) {
tty->print("# before select:");
for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
Node *n = worklist[i]; // Get Node on worklist
tty->print(" %3d", n->_idx);
}
tty->print("\n");
}
#endif
// Select and pop a ready guy from worklist
Node* n = select(worklist, bbs, ready_cnt, next_call, phi_cnt, node_latency);
_nodes.map(phi_cnt++,n); // Schedule him next
MachNode *m = n->is_Mach();
#ifndef PRODUCT
if (TraceOptoPipelining && before_size > 1) {
tty->print("# select %d: %s", n->_idx, n->Name());
tty->print(", latency:%d", node_latency.at_grow(n->_idx));
n->dump();
tty->print("# after select:");
for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
Node *n = worklist[i]; // Get Node on worklist
tty->print(" %4d", n->_idx);
}
tty->print("\n");
}
#endif
if( m ) {
MachCallNode *mcall = m->is_MachCall();
if( mcall ) {
phi_cnt = sched_call(matcher, bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
continue;
}
}
// Children are now all ready
for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
Node* m = n->fast_out(i5); // Get user
if( bbs[m->_idx] != this ) continue;
if( m->is_Phi() ) continue;
if( !--ready_cnt[m->_idx] )
worklist.push(m);
}
}
if( phi_cnt != end_idx() ) {
// did not schedule all. Retry, Bailout, or Die
Compile* C = matcher.C;
if (C->subsume_loads() == true) {
// Retry with subsume_loads == false
C->set_result(Compile::Comp_subsumed_load_conflict);
} else {
// Bailout without retry
C->set_result(Compile::Comp_no_retry);
}
// assert( phi_cnt == end_idx(), "did not schedule all" );
return false;
}
#ifndef PRODUCT
if (TraceOptoPipelining) {
tty->print("# after schedule_local\n");
for (uint i = 0;i < _nodes.size();i++) {
tty->print("# ");
_nodes[i]->fast_dump();
}
tty->print("\n");
}
#endif
return true;
}
示例15: delete
inline void operator delete( void* ptr ) {
Compile* compile = Compile::current();
compile->type_arena()->Afree(ptr,compile->type_last_size());
}