本文整理汇总了C++中CompileLog类的典型用法代码示例。如果您正苦于以下问题:C++ CompileLog类的具体用法?C++ CompileLog怎么用?C++ CompileLog使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CompileLog类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: in
//------------------------------Ideal------------------------------------------
// Check for the case of comparing an unknown klass loaded from the primary
// super-type array vs a known klass with no subtypes. This amounts to
// checking to see an unknown klass subtypes a known klass with no subtypes;
// this only happens on an exact match. We can shorten this test by 1 load.
Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
// Constant pointer on right?
const Type *t2 = phase->type(in(2));
if( t2 == TypePtr::NULL_PTR || !t2->singleton() || t2 == Type::TOP )
return NULL;
// Now check for LoadKlass on left.
Node *ldk1 = in(1);
if( ldk1->Opcode() != Op_LoadKlass )
return NULL;
// Check for loading from primary supertype array.
// Any nested loadklass from loadklass+con must be from the p.s.array
Node *adr1 = ldk1->in(MemNode::Address);
if( adr1->Opcode() != Op_AddP )
return NULL;
Node *ldk2 = adr1->in(AddPNode::Address);
Node *off2 = adr1->in(AddPNode::Offset);
if( ldk2->Opcode() != Op_LoadKlass )
return NULL;
jint con2;
if( !off2->get_int(&con2) )
return NULL;
// Get the constant klass we are comparing to.
ciType *superklass = t2->is_klassptr()->klass();
// Verify that we understand the situation
if( ((ciKlass*)superklass)->super_check_offset() != (juint)con2 )
return NULL; // Might be element-klass loading from array klass
// If 'superklass' has no subklasses and is not an interface, then we are
// assured that the only input which will pass the type check is
// 'superklass' itself.
//
// We could be more liberal here, and allow the optimization on interfaces
// which have a single implementor. This would require us to increase the
// expressiveness of the add_dependency() mechanism.
// Object arrays must have their base element have no subtypes
while( superklass->is_obj_array_klass() )
superklass = superklass->as_obj_array_klass()->base_element_type();
if( superklass->is_instance_klass() ) {
ciInstanceKlass* ik = superklass->as_instance_klass();
if( ik->has_subklass() || ik->flags().is_interface() ) return NULL;
// Add a dependency if there is a chance that a subclass will be added later.
if( !ik->flags().is_final()) {
CompileLog* log = phase->C->log();
if (log != NULL){
log->elem("cast_up reason='!has_subklass' from='%d' to='(exact)'",
log->identify(ik));
}
phase->C->recorder()->add_dependent(ik, NULL);
}
}
// Bypass the dependent load, and compare directly
this->set_req(1,ldk2);
return this;
}
示例2: CE_Eliminator
CE_Eliminator(IR* hir) : _cee_count(0), _ifop_count(0), _hir(hir) {
_has_substitution = false;
_hir->iterate_preorder(this);
if (_has_substitution) {
// substituted some ifops/phis, so resolve the substitution
SubstitutionResolver sr(_hir);
}
CompileLog* log = _hir->compilation()->log();
if (log != NULL)
log->set_context("optimize name='cee'");
}
示例3: peek
//------------------------------array_store_check------------------------------
// pull array from stack and check that the store is valid
void Parse::array_store_check() {
// Shorthand access to array store elements without popping them.
Node *obj = peek(0);
Node *idx = peek(1);
Node *ary = peek(2);
if (_gvn.type(obj) == TypePtr::NULL_PTR) {
// There's never a type check on null values.
// This cutout lets us avoid the uncommon_trap(Reason_array_check)
// below, which turns into a performance liability if the
// gen_checkcast folds up completely.
return;
}
// Extract the array klass type
int klass_offset = oopDesc::klass_offset_in_bytes();
Node* p = basic_plus_adr( ary, ary, klass_offset );
// p's type is array-of-OOPS plus klass_offset
Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
// Get the array klass
const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
// array_klass's type is generally INexact array-of-oop. Heroically
// cast the array klass to EXACT array and uncommon-trap if the cast
// fails.
bool always_see_exact_class = false;
if (MonomorphicArrayCheck
&& !too_many_traps(Deoptimization::Reason_array_check)) {
always_see_exact_class = true;
// (If no MDO at all, hope for the best, until a trap actually occurs.)
}
// Is the array klass is exactly its defined type?
if (always_see_exact_class && !tak->klass_is_exact()) {
// Make a constant out of the inexact array klass
const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
Node* con = makecon(extak);
Node* cmp = _gvn.transform(new (C) CmpPNode( array_klass, con ));
Node* bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::eq ));
Node* ctrl= control();
{ BuildCutout unless(this, bol, PROB_MAX);
uncommon_trap(Deoptimization::Reason_array_check,
Deoptimization::Action_maybe_recompile,
tak->klass());
}
if (stopped()) { // MUST uncommon-trap?
set_control(ctrl); // Then Don't Do It, just fall into the normal checking
} else { // Cast array klass to exactness:
// Use the exact constant value we know it is.
replace_in_map(array_klass,con);
CompileLog* log = C->log();
if (log != NULL) {
log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
log->identify(tak->klass()));
}
array_klass = con; // Use cast value moving forward
}
}
// Come here for polymorphic array klasses
// Extract the array element class
int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
// Check (the hard way) and throw if not a subklass.
// Result is ignored, we just need the CFG effects.
gen_checkcast( obj, a_e_klass );
}
示例4:
~CE_Eliminator() {
CompileLog* log = _hir->compilation()->log();
if (log != NULL)
log->clear_context(); // skip marker if nothing was printed
}
示例5: kit
JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn();
CompileLog* log = kit.C->log();
if (log != NULL) {
log->elem("predicted_intrinsic bci='%d' method='%d'",
jvms->bci(), log->identify(method()));
}
Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
SafePointNode* slow_map = NULL;
JVMState* slow_jvms;
if (slow_ctl != NULL) {
PreserveJVMState pjvms(&kit);
kit.set_control(slow_ctl);
if (!kit.stopped()) {
slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(slow_jvms != NULL, "must be");
kit.add_exception_states_from(slow_jvms);
kit.set_map(slow_jvms->map());
if (!kit.stopped())
slow_map = kit.stop();
}
}
if (kit.stopped()) {
// Predicate is always false.
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
}
// Generate intrinsic code:
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
if (new_jvms == NULL) {
// Intrinsic failed, so use slow code or make a direct call.
if (slow_map == NULL) {
CallGenerator* cg = CallGenerator::for_direct_call(method());
new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
} else {
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
}
}
kit.add_exception_states_from(new_jvms);
kit.set_jvms(new_jvms);
// Need to merge slow and fast?
if (slow_map == NULL) {
// The fast path is the only path remaining.
return kit.transfer_exceptions_into_jvms();
}
if (kit.stopped()) {
// Intrinsic method threw an exception, so it's just the slow path after all.
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
}
// Finish the diamond.
kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
RegionNode* region = new (kit.C) RegionNode(3);
region->init_req(1, kit.control());
region->init_req(2, slow_map->control());
kit.set_control(gvn.transform(region));
Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
iophi->set_req(2, slow_map->i_o());
kit.set_i_o(gvn.transform(iophi));
kit.merge_memory(slow_map->merged_memory(), region, 2);
uint tos = kit.jvms()->stkoff() + kit.sp();
uint limit = slow_map->req();
for (uint i = TypeFunc::Parms; i < limit; i++) {
// Skip unused stack slots; fast forward to monoff();
if (i == tos) {
i = kit.jvms()->monoff();
if( i >= limit ) break;
}
Node* m = kit.map()->in(i);
Node* n = slow_map->in(i);
if (m != n) {
const Type* t = gvn.type(m)->meet(gvn.type(n));
Node* phi = PhiNode::make(region, m, t);
phi->set_req(2, n);
kit.map()->set_req(i, gvn.transform(phi));
}
}
return kit.transfer_exceptions_into_jvms();
}
示例6: call_node
void LateInlineCallGenerator::do_late_inline() {
// Can't inline it
CallStaticJavaNode* call = call_node();
if (call == NULL || call->outcnt() == 0 ||
call->in(0) == NULL || call->in(0)->is_top()) {
return;
}
const TypeTuple *r = call->tf()->domain();
for (int i1 = 0; i1 < method()->arg_size(); i1++) {
if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
}
if (call->in(TypeFunc::Memory)->is_top()) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
Compile* C = Compile::current();
// Remove inlined methods from Compiler's lists.
if (call->is_macro()) {
C->remove_macro_node(call);
}
// Make a clone of the JVMState that appropriate to use for driving a parse
JVMState* old_jvms = call->jvms();
JVMState* jvms = old_jvms->clone_shallow(C);
uint size = call->req();
SafePointNode* map = new (C) SafePointNode(size, jvms);
for (uint i1 = 0; i1 < size; i1++) {
map->init_req(i1, call->in(i1));
}
// Make sure the state is a MergeMem for parsing.
if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
C->initial_gvn()->set_type_bottom(mem);
map->set_req(TypeFunc::Memory, mem);
}
uint nargs = method()->arg_size();
// blow away old call arguments
Node* top = C->top();
for (uint i1 = 0; i1 < nargs; i1++) {
map->set_req(TypeFunc::Parms + i1, top);
}
jvms->set_map(map);
// Make enough space in the expression stack to transfer
// the incoming arguments and return value.
map->ensure_stack(jvms, jvms->method()->max_stack());
for (uint i1 = 0; i1 < nargs; i1++) {
map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
}
// This check is done here because for_method_handle_inline() method
// needs jvms for inlined state.
if (!do_late_inline_check(jvms)) {
map->disconnect_inputs(NULL, C);
return;
}
C->print_inlining_insert(this);
CompileLog* log = C->log();
if (log != NULL) {
log->head("late_inline method='%d'", log->identify(method()));
JVMState* p = jvms;
while (p != NULL) {
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
p = p->caller();
}
log->tail("late_inline");
}
// Setup default node notes to be picked up by the inlining
Node_Notes* old_nn = C->default_node_notes();
if (old_nn != NULL) {
Node_Notes* entry_nn = old_nn->clone(C);
entry_nn->set_jvms(jvms);
C->set_default_node_notes(entry_nn);
}
// Now perform the inling using the synthesized JVMState
JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
if (new_jvms == NULL) return; // no change
if (C->failing()) return;
// Capture any exceptional control flow
GraphKit kit(new_jvms);
// Find the result object
Node* result = C->top();
int result_size = method()->return_type()->size();
if (result_size != 0 && !kit.stopped()) {
result = (result_size == 1) ? kit.pop() : kit.pop_pair();
}
//.........这里部分代码省略.........
示例7: kit
JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
// The code we want to generate here is:
// if (receiver == NULL)
// uncommon_Trap
// if (predicate(0))
// do_intrinsic(0)
// else
// if (predicate(1))
// do_intrinsic(1)
// ...
// else
// do_java_comp
GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn();
CompileLog* log = kit.C->log();
if (log != NULL) {
log->elem("predicated_intrinsic bci='%d' method='%d'",
jvms->bci(), log->identify(method()));
}
if (!method()->is_static()) {
// We need an explicit receiver null_check before checking its type in predicate.
// We share a map with the caller, so his JVMS gets adjusted.
Node* receiver = kit.null_check_receiver_before_call(method());
if (kit.stopped()) {
return kit.transfer_exceptions_into_jvms();
}
}
int n_predicates = _intrinsic->predicates_count();
assert(n_predicates > 0, "sanity");
JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
// Region for normal compilation code if intrinsic failed.
Node* slow_region = new (kit.C) RegionNode(1);
int results = 0;
for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
#ifdef ASSERT
JVMState* old_jvms = kit.jvms();
SafePointNode* old_map = kit.map();
Node* old_io = old_map->i_o();
Node* old_mem = old_map->memory();
Node* old_exc = old_map->next_exception();
#endif
Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
#ifdef ASSERT
// Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
SafePointNode* new_map = kit.map();
assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
#endif
if (!kit.stopped()) {
PreserveJVMState pjvms(&kit);
// Generate intrinsic code:
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
if (new_jvms == NULL) {
// Intrinsic failed, use normal compilation path for this predicate.
slow_region->add_req(kit.control());
} else {
kit.add_exception_states_from(new_jvms);
kit.set_jvms(new_jvms);
if (!kit.stopped()) {
result_jvms[results++] = kit.jvms();
}
}
}
if (else_ctrl == NULL) {
else_ctrl = kit.C->top();
}
kit.set_control(else_ctrl);
}
if (!kit.stopped()) {
// Final 'else' after predicates.
slow_region->add_req(kit.control());
}
if (slow_region->req() > 1) {
PreserveJVMState pjvms(&kit);
// Generate normal compilation code:
kit.set_control(gvn.transform(slow_region));
JVMState* new_jvms = _cg->generate(kit.sync_jvms());
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(new_jvms != NULL, "must be");
kit.add_exception_states_from(new_jvms);
kit.set_jvms(new_jvms);
if (!kit.stopped()) {
result_jvms[results++] = kit.jvms();
}
}
if (results == 0) {
// All paths ended in uncommon traps.
(void) kit.stop();
return kit.transfer_exceptions_into_jvms();
//.........这里部分代码省略.........
示例8: kit
JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
Compile* C = kit.C;
PhaseGVN& gvn = kit.gvn();
CompileLog* log = C->log();
if (log != NULL) {
log->elem("predicted_dynamic_call bci='%d'", jvms->bci());
}
const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true);
Node* predicted_mh = kit.makecon(predicted_mh_ptr);
Node* bol = NULL;
int bc = jvms->method()->java_code_at_bci(jvms->bci());
if (bc != Bytecodes::_invokedynamic) {
// This is the selectAlternative idiom for guardWithTest or
// similar idioms.
Node* receiver = kit.argument(0);
// Check if the MethodHandle is the expected one
Node* cmp = gvn.transform(new (C, 3) CmpPNode(receiver, predicted_mh));
bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) );
} else {
// Get the constant pool cache from the caller class.
ciMethod* caller_method = jvms->method();
ciBytecodeStream str(caller_method);
str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
ciCPCache* cpcache = str.get_cpcache();
// Get the offset of the CallSite from the constant pool cache
// pointer.
int index = str.get_method_index();
size_t call_site_offset = cpcache->get_f1_offset(index);
// Load the CallSite object from the constant pool cache.
const TypeOopPtr* cpcache_type = TypeOopPtr::make_from_constant(cpcache); // returns TypeAryPtr of type T_OBJECT
const TypeOopPtr* call_site_type = TypeOopPtr::make_from_klass(C->env()->CallSite_klass());
Node* cpcache_adr = kit.makecon(cpcache_type);
Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, call_site_offset);
// The oops in the constant pool cache are not compressed; load then as raw pointers.
Node* call_site = kit.make_load(kit.control(), call_site_adr, call_site_type, T_ADDRESS, Compile::AliasIdxRaw);
// Load the target MethodHandle from the CallSite object.
const TypeOopPtr* target_type = TypeOopPtr::make_from_klass(C->env()->MethodHandle_klass());
Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
Node* target_mh = kit.make_load(kit.control(), target_adr, target_type, T_OBJECT);
// Check if the MethodHandle is still the same.
Node* cmp = gvn.transform(new (C, 3) CmpPNode(target_mh, predicted_mh));
bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) );
}
IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
kit.set_control( gvn.transform(new (C, 1) IfTrueNode (iff)));
Node* slow_ctl = gvn.transform(new (C, 1) IfFalseNode(iff));
SafePointNode* slow_map = NULL;
JVMState* slow_jvms;
{ PreserveJVMState pjvms(&kit);
kit.set_control(slow_ctl);
if (!kit.stopped()) {
slow_jvms = _if_missed->generate(kit.sync_jvms());
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(slow_jvms != NULL, "must be");
kit.add_exception_states_from(slow_jvms);
kit.set_map(slow_jvms->map());
if (!kit.stopped())
slow_map = kit.stop();
}
}
if (kit.stopped()) {
// Instance exactly does not matches the desired type.
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
}
// Make the hot call:
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
if (new_jvms == NULL) {
// Inline failed, so make a direct call.
assert(_if_hit->is_inline(), "must have been a failed inline");
CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
new_jvms = cg->generate(kit.sync_jvms());
}
kit.add_exception_states_from(new_jvms);
kit.set_jvms(new_jvms);
// Need to merge slow and fast?
if (slow_map == NULL) {
// The fast path is the only path remaining.
return kit.transfer_exceptions_into_jvms();
}
if (kit.stopped()) {
// Inlined method threw an exception, so it's just the slow path after all.
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
}
//.........这里部分代码省略.........
示例9: call_node
void LateInlineCallGenerator::do_late_inline() {
// Can't inline it
if (call_node() == NULL || call_node()->outcnt() == 0 ||
call_node()->in(0) == NULL || call_node()->in(0)->is_top())
return;
CallStaticJavaNode* call = call_node();
// Make a clone of the JVMState that appropriate to use for driving a parse
Compile* C = Compile::current();
JVMState* jvms = call->jvms()->clone_shallow(C);
uint size = call->req();
SafePointNode* map = new (C, size) SafePointNode(size, jvms);
for (uint i1 = 0; i1 < size; i1++) {
map->init_req(i1, call->in(i1));
}
// Make sure the state is a MergeMem for parsing.
if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
}
// Make enough space for the expression stack and transfer the incoming arguments
int nargs = method()->arg_size();
jvms->set_map(map);
map->ensure_stack(jvms, jvms->method()->max_stack());
if (nargs > 0) {
for (int i1 = 0; i1 < nargs; i1++) {
map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
}
}
CompileLog* log = C->log();
if (log != NULL) {
log->head("late_inline method='%d'", log->identify(method()));
JVMState* p = jvms;
while (p != NULL) {
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
p = p->caller();
}
log->tail("late_inline");
}
// Setup default node notes to be picked up by the inlining
Node_Notes* old_nn = C->default_node_notes();
if (old_nn != NULL) {
Node_Notes* entry_nn = old_nn->clone(C);
entry_nn->set_jvms(jvms);
C->set_default_node_notes(entry_nn);
}
// Now perform the inling using the synthesized JVMState
JVMState* new_jvms = _inline_cg->generate(jvms);
if (new_jvms == NULL) return; // no change
if (C->failing()) return;
// Capture any exceptional control flow
GraphKit kit(new_jvms);
// Find the result object
Node* result = C->top();
int result_size = method()->return_type()->size();
if (result_size != 0 && !kit.stopped()) {
result = (result_size == 1) ? kit.pop() : kit.pop_pair();
}
kit.replace_call(call, result);
}
示例10: peek
//------------------------------array_store_check------------------------------
// pull array from stack and check that the store is valid
void Parse::array_store_check() {
// Shorthand access to array store elements without popping them.
Node *obj = peek(0);
Node *idx = peek(1);
Node *ary = peek(2);
if (_gvn.type(obj) == TypePtr::NULL_PTR) {
// There's never a type check on null values.
// This cutout lets us avoid the uncommon_trap(Reason_array_check)
// below, which turns into a performance liability if the
// gen_checkcast folds up completely.
return;
}
// Extract the array klass type
int klass_offset = oopDesc::klass_offset_in_bytes();
Node* p = basic_plus_adr( ary, ary, klass_offset );
// p's type is array-of-OOPS plus klass_offset
Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
// Get the array klass
const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
// The type of array_klass is usually INexact array-of-oop. Heroically
// cast array_klass to EXACT array and uncommon-trap if the cast fails.
// Make constant out of the inexact array klass, but use it only if the cast
// succeeds.
bool always_see_exact_class = false;
if (MonomorphicArrayCheck
&& !too_many_traps(Deoptimization::Reason_array_check)
&& !tak->klass_is_exact()
&& tak != TypeKlassPtr::OBJECT) {
// Regarding the fourth condition in the if-statement from above:
//
// If the compiler has determined that the type of array 'ary' (represented
// by 'array_klass') is java/lang/Object, the compiler must not assume that
// the array 'ary' is monomorphic.
//
// If 'ary' were of type java/lang/Object, this arraystore would have to fail,
// because it is not possible to perform a arraystore into an object that is not
// a "proper" array.
//
// Therefore, let's obtain at runtime the type of 'ary' and check if we can still
// successfully perform the store.
//
// The implementation reasons for the condition are the following:
//
// java/lang/Object is the superclass of all arrays, but it is represented by the VM
// as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
// 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
//
// See issue JDK-8057622 for details.
always_see_exact_class = true;
// (If no MDO at all, hope for the best, until a trap actually occurs.)
// Make a constant out of the inexact array klass
const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
Node* con = makecon(extak);
Node* cmp = _gvn.transform(new CmpPNode( array_klass, con ));
Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq ));
Node* ctrl= control();
{ BuildCutout unless(this, bol, PROB_MAX);
uncommon_trap(Deoptimization::Reason_array_check,
Deoptimization::Action_maybe_recompile,
tak->klass());
}
if (stopped()) { // MUST uncommon-trap?
set_control(ctrl); // Then Don't Do It, just fall into the normal checking
} else { // Cast array klass to exactness:
// Use the exact constant value we know it is.
replace_in_map(array_klass,con);
CompileLog* log = C->log();
if (log != NULL) {
log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
log->identify(tak->klass()));
}
array_klass = con; // Use cast value moving forward
}
}
// Come here for polymorphic array klasses
// Extract the array element class
int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
// We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
// we must set a control edge from the IfTrue node created by the uncommon_trap above to the
// LoadKlassNode.
Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
immutable_memory(), p2, tak));
// Check (the hard way) and throw if not a subklass.
// Result is ignored, we just need the CFG effects.
gen_checkcast(obj, a_e_klass);
}
示例11: CHECK_BAILOUT
void Compilation::build_hir() {
CHECK_BAILOUT();
// setup ir
CompileLog* log = this->log();
if (log != NULL) {
log->begin_head("parse method='%d' ",
log->identify(_method));
log->stamp();
log->end_head();
}
_hir = new IR(this, method(), osr_bci());
if (log) log->done("parse");
if (!_hir->is_valid()) {
bailout("invalid parsing");
return;
}
#ifndef PRODUCT
if (PrintCFGToFile) {
CFGPrinter::print_cfg(_hir, "After Generation of HIR", true, false);
}
#endif
#ifndef PRODUCT
if (PrintCFG || PrintCFG0) { tty->print_cr("CFG after parsing"); _hir->print(true); }
if (PrintIR || PrintIR0 ) { tty->print_cr("IR after parsing"); _hir->print(false); }
#endif
_hir->verify();
if (UseC1Optimizations) {
NEEDS_CLEANUP
// optimization
PhaseTraceTime timeit(_t_optimize_blocks);
_hir->optimize_blocks();
}
_hir->verify();
_hir->split_critical_edges();
#ifndef PRODUCT
if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after optimizations"); _hir->print(true); }
if (PrintIR || PrintIR1 ) { tty->print_cr("IR after optimizations"); _hir->print(false); }
#endif
_hir->verify();
// compute block ordering for code generation
// the control flow must not be changed from here on
_hir->compute_code();
if (UseGlobalValueNumbering) {
// No resource mark here! LoopInvariantCodeMotion can allocate ValueStack objects.
int instructions = Instruction::number_of_instructions();
GlobalValueNumbering gvn(_hir);
assert(instructions == Instruction::number_of_instructions(),
"shouldn't have created an instructions");
}
_hir->verify();
#ifndef PRODUCT
if (PrintCFGToFile) {
CFGPrinter::print_cfg(_hir, "Before RangeCheckElimination", true, false);
}
#endif
if (RangeCheckElimination) {
if (_hir->osr_entry() == NULL) {
PhaseTraceTime timeit(_t_rangeCheckElimination);
RangeCheckElimination::eliminate(_hir);
}
}
#ifndef PRODUCT
if (PrintCFGToFile) {
CFGPrinter::print_cfg(_hir, "After RangeCheckElimination", true, false);
}
#endif
if (UseC1Optimizations) {
// loop invariant code motion reorders instructions and range
// check elimination adds new instructions so do null check
// elimination after.
NEEDS_CLEANUP
// optimization
PhaseTraceTime timeit(_t_optimize_null_checks);
_hir->eliminate_null_checks();
}
_hir->verify();
// compute use counts after global value numbering
_hir->compute_use_counts();
#ifndef PRODUCT
//.........这里部分代码省略.........
示例12: PhaseTraceTime
PhaseTraceTime(TimerName timer)
: TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) {
if (Compilation::current() != NULL) {
_log = Compilation::current()->log();
}
if (_log != NULL) {
_log->begin_head("phase name='%s'", timer_name[timer]);
_log->stamp();
_log->end_head();
}
}
示例13:
~PhaseTraceTime() {
if (_log != NULL)
_log->done("phase");
}