本文整理汇总了C++中GrowableArray::at_grow方法的典型用法代码示例。如果您正苦于以下问题:C++ GrowableArray::at_grow方法的具体用法?C++ GrowableArray::at_grow怎么用?C++ GrowableArray::at_grow使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GrowableArray
的用法示例。
在下文中一共展示了GrowableArray::at_grow方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
// address of an element in _nodes. Used when the element is to be modified
PointsToNode *ptnode_adr(uint idx) {
if ((uint)_nodes->length() <= idx) {
// expand _nodes array
PointsToNode dummy = _nodes->at_grow(idx);
}
return _nodes->adr_at(idx);
}
示例2: schedule_local
//------------------------------schedule_local---------------------------------
// Topological sort within a block. Someday become a real scheduler.
bool Block::schedule_local(Matcher &matcher, Block_Array &bbs,int *ready_cnt, VectorSet &next_call, GrowableArray<uint> &node_latency) {
// Already "sorted" are the block start Node (as the first entry), and
// the block-ending Node and any trailing control projections. We leave
// these alone. PhiNodes and ParmNodes are made to follow the block start
// Node. Everything else gets topo-sorted.
#ifndef PRODUCT
if (TraceOptoPipelining) {
tty->print("# before schedule_local\n");
for (uint i = 0;i < _nodes.size();i++) {
tty->print("# ");
_nodes[i]->fast_dump();
}
tty->print("\n");
}
#endif
// RootNode is already sorted
if( _nodes.size() == 1 ) return true;
// Move PhiNodes and ParmNodes from 1 to cnt up to the start
uint node_cnt = end_idx();
uint phi_cnt = 1;
uint i;
for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
Node *n = _nodes[i];
if( n->is_Phi() || // Found a PhiNode or ParmNode
(n->is_Proj() && n->in(0) == head()) ) {
// Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
_nodes.map(i,_nodes[phi_cnt]);
_nodes.map(phi_cnt++,n); // swap Phi/Parm up front
} else { // All others
// Count block-local inputs to 'n'
uint cnt = n->len(); // Input count
uint local = 0;
for( uint j=0; j<cnt; j++ ) {
Node *m = n->in(j);
if( m && bbs[m->_idx] == this && !m->is_top() )
local++; // One more block-local input
}
ready_cnt[n->_idx] = local; // Count em up
// A few node types require changing a required edge to a precedence edge
// before allocation.
MachNode *m = n->is_Mach();
if( UseConcMarkSweepGC ) {
if( m && m->ideal_Opcode() == Op_StoreCM ) {
// Note: Required edges with an index greater than oper_input_base
// are not supported by the allocator.
// Note2: Can only depend on unmatched edge being last,
// can not depend on its absolute position.
Node *oop_store = n->in(n->req() - 1);
n->del_req(n->req() - 1);
n->add_prec(oop_store);
assert(bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
}
}
if( m && m->ideal_Opcode() == Op_MemBarAcquire ) {
Node *x = n->in(TypeFunc::Parms);
n->del_req(TypeFunc::Parms);
n->add_prec(x);
}
}
}
for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
ready_cnt[_nodes[i2]->_idx] = 0;
// All the prescheduled guys do not hold back internal nodes
uint i3;
for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled
Node *n = _nodes[i3]; // Get pre-scheduled
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j);
if( bbs[m->_idx] ==this ) // Local-block user
ready_cnt[m->_idx]--; // Fix ready count
}
}
// Make a worklist
Node_List worklist;
for(uint i4=i3; i4<node_cnt; i4++ ) { // Put ready guys on worklist
Node *m = _nodes[i4];
if( !ready_cnt[m->_idx] ) // Zero ready count?
worklist.push(m); // Then on to worklist!
}
// Warm up the 'next_call' heuristic bits
needed_for_next_call(_nodes[0], next_call, bbs);
#ifndef PRODUCT
if (TraceOptoPipelining) {
for (uint j=0; j<_nodes.size(); j++) {
Node *n = _nodes[j];
int idx = n->_idx;
tty->print("# ready cnt:%3d ", ready_cnt[idx]);
tty->print("latency:%3d ", node_latency.at_grow(idx));
tty->print("%4d: %s\n", idx, n->Name());
}
//.........这里部分代码省略.........
示例3: implicit_null_check
//.........这里部分代码省略.........
Node *n = b->_nodes[k];
if( n->check_for_anti_dependence() &&
n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
break; // Found anti-dependent load
}
if( k < b->_nodes.size() )
break; // Found anti-dependent load
// Make sure control does not do a merge (would have to check allpaths)
if( b->num_preds() != 2 ) break;
b = bbs[b->pred(1)->_idx]; // Move up to predecessor block
}
if( b != this ) continue;
}
// Make sure this memory op is not already being used for a NullCheck
MachNode *e = mb->end()->is_Mach();
if( e && e->is_MachNullCheck() && e->in(1) == mach )
continue; // Already being used as a NULL check
// Found a candidate! Pick one with least dom depth - the highest
// in the dom tree should be closest to the null check.
if( !best ||
bbs[mach->_idx]->_dom_depth < bbs[best->_idx]->_dom_depth ) {
best = mach;
bidx = vidx;
}
}
// No candidate!
if( !best ) return;
// ---- Found an implicit null check
extern int implicit_null_checks;
implicit_null_checks++;
// Hoist the memory candidate up to the end of the test block.
Block *old_block = bbs[best->_idx];
old_block->find_remove(best);
add_inst(best);
bbs.map(best->_idx,this);
// Move the control dependence
if (best->in(0) && best->in(0) == old_block->_nodes[0])
best->set_req(0, _nodes[0]);
// Check for flag-killing projections that also need to be hoisted
// Should be DU safe because no edge updates.
for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
Node* n = best->fast_out(j);
if( n->Opcode() == Op_MachProj ) {
bbs[n->_idx]->find_remove(n);
add_inst(n);
bbs.map(n->_idx,this);
}
}
// proj==Op_True --> ne test; proj==Op_False --> eq test.
// One of two graph shapes got matched:
// (IfTrue (If (Bool NE (CmpP ptr NULL))))
// (IfFalse (If (Bool EQ (CmpP ptr NULL))))
// NULL checks are always branch-if-eq. If we see a IfTrue projection
// then we are replacing a 'ne' test with a 'eq' NULL check test.
// We need to flip the projections to keep the same semantics.
if( proj->Opcode() == Op_IfTrue ) {
// Swap order of projections in basic block to swap branch targets
Node *tmp1 = _nodes[end_idx()+1];
Node *tmp2 = _nodes[end_idx()+2];
_nodes.map(end_idx()+1, tmp2);
_nodes.map(end_idx()+2, tmp1);
Node *tmp = new (1) Node(1);
tmp1->replace_by(tmp);
tmp2->replace_by(tmp1);
tmp->replace_by(tmp2);
}
// Remove the existing null check; use a new implicit null check instead.
// Since schedule-local needs precise def-use info, we need to correct
// it as well.
Node *old_tst = proj->in(0);
MachNode *nul_chk = new MachNullCheckNode(old_tst->in(0),best,bidx);
_nodes.map(end_idx(),nul_chk);
bbs.map(nul_chk->_idx,this);
// Redirect users of old_test to nul_chk
for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
old_tst->last_out(i2)->set_req(0, nul_chk);
// Clean-up any dead code
for (uint i3 = 0; i3 < old_tst->req(); i3++)
old_tst->set_req(i3, NULL);
latency.at_put_grow(nul_chk->_idx, nul_chk->latency_from_uses(bbs, latency));
latency.at_put_grow(best ->_idx, best ->latency_from_uses(bbs, latency));
#ifndef PRODUCT
if (TraceOptoPipelining) {
tty->print("# implicit_null_check: latency %4d for ", latency.at_grow(best->_idx));
best->fast_dump();
tty->print("# implicit_null_check: latency %4d for ", latency.at_grow(nul_chk->_idx));
nul_chk->fast_dump();
}
#endif
}
示例4: hidden_alias
bool hidden_alias(Node *n) {
if (_collecting)
return true;
PointsToNode ptn = _nodes->at_grow(n->_idx);
return (ptn.escape_state() != PointsToNode::NoEscape) || ptn._hidden_alias;
}