本文整理汇总了C++中SUnit类的典型用法代码示例。如果您正苦于以下问题:C++ SUnit类的具体用法?C++ SUnit怎么用?C++ SUnit使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SUnit类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
/// MO is an operand of SU's instruction that defines a physical register. Add
/// data dependencies from SU to any uses of the physical register.
void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx);
assert(MO.isDef() && "expect physreg def");
// Ask the target if address-backscheduling is desirable, and if so how much.
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
Alias.isValid(); ++Alias) {
if (!Uses.contains(*Alias))
continue;
std::vector<PhysRegSUOper> &UseList = Uses[*Alias];
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i].SU;
if (UseSU == SU)
continue;
SDep dep(SU, SDep::Data, 1, *Alias);
// Adjust the dependence latency using operand def/use information,
// then allow the target to perform its own adjustments.
int UseOp = UseList[i].OpIdx;
MachineInstr *RegUse = UseOp < 0 ? 0 : UseSU->getInstr();
dep.setLatency(
SchedModel.computeOperandLatency(SU->getInstr(), OperIdx,
RegUse, UseOp, /*FindMin=*/false));
dep.setMinLatency(
SchedModel.computeOperandLatency(SU->getInstr(), OperIdx,
RegUse, UseOp, /*FindMin=*/true));
ST.adjustSchedDependency(SU, UseSU, dep);
UseSU->addPred(dep);
}
}
}
示例2: addVRegDefDeps
/// addVRegDefDeps - Add register output and data dependencies from this SUnit
/// to instructions that occur later in the same scheduling region if they read
/// from or write to the virtual register defined at OperIdx.
///
/// TODO: Hoist loop induction variable increments. This has to be
/// reevaluated. Generally, IV scheduling should be done before coalescing.
void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
const MachineInstr *MI = SU->getInstr();
unsigned Reg = MI->getOperand(OperIdx).getReg();
// Singly defined vregs do not have output/anti dependencies.
// The current operand is a def, so we have at least one.
// Check here if there are any others...
if (MRI.hasOneDef(Reg))
return;
// Add output dependence to the next nearest def of this vreg.
//
// Unless this definition is dead, the output dependence should be
// transitively redundant with antidependencies from this definition's
// uses. We're conservative for now until we have a way to guarantee the uses
// are not eliminated sometime during scheduling. The output dependence edge
// is also useful if output latency exceeds def-use latency.
VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
if (DefI == VRegDefs.end())
VRegDefs.insert(VReg2SUnit(Reg, SU));
else {
SUnit *DefSU = DefI->SU;
if (DefSU != SU && DefSU != &ExitSU) {
unsigned OutLatency = TII->getOutputLatency(InstrItins, MI, OperIdx,
DefSU->getInstr());
DefSU->addPred(SDep(SU, SDep::Output, OutLatency, Reg));
}
DefI->SU = SU;
}
}
示例3: EmitNoop
/// EmitSchedule - Emit the machine code in scheduled order.
MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
DenseMap<SDValue, unsigned> VRBaseMap;
DenseMap<SUnit*, unsigned> CopyVRBaseMap;
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
SUnit *SU = Sequence[i];
if (!SU) {
// Null SUnit* is a noop.
EmitNoop();
continue;
}
// For pre-regalloc scheduling, create instructions corresponding to the
// SDNode and any flagged SDNodes and append them to the block.
if (!SU->getNode()) {
// Emit a copy.
EmitPhysRegCopy(SU, CopyVRBaseMap);
continue;
}
SmallVector<SDNode *, 4> FlaggedNodes;
for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
N = N->getFlaggedNode())
FlaggedNodes.push_back(N);
while (!FlaggedNodes.empty()) {
EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,VRBaseMap);
FlaggedNodes.pop_back();
}
EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned, VRBaseMap);
}
return BB;
}
示例4: ComputeHeight
/// ComputeHeight - Calculate the maximal path from the node to the entry.
///
void SUnit::ComputeHeight() {
SmallVector<SUnit*, 8> WorkList;
WorkList.push_back(this);
do {
SUnit *Cur = WorkList.back();
bool Done = true;
unsigned MaxSuccHeight = 0;
for (SUnit::const_succ_iterator I = Cur->Succs.begin(),
E = Cur->Succs.end(); I != E; ++I) {
SUnit *SuccSU = I->getSUnit();
if (SuccSU->isHeightCurrent)
MaxSuccHeight = std::max(MaxSuccHeight,
SuccSU->Height + I->getLatency());
else {
Done = false;
WorkList.push_back(SuccSU);
}
}
if (Done) {
WorkList.pop_back();
if (MaxSuccHeight != Cur->Height) {
Cur->setHeightDirty();
Cur->Height = MaxSuccHeight;
}
Cur->isHeightCurrent = true;
}
} while (!WorkList.empty());
}
示例5: ComputeDepth
/// ComputeDepth - Calculate the maximal path from the node to the exit.
///
void SUnit::ComputeDepth() {
SmallVector<SUnit*, 8> WorkList;
WorkList.push_back(this);
do {
SUnit *Cur = WorkList.back();
bool Done = true;
unsigned MaxPredDepth = 0;
for (SUnit::const_pred_iterator I = Cur->Preds.begin(),
E = Cur->Preds.end(); I != E; ++I) {
SUnit *PredSU = I->getSUnit();
if (PredSU->isDepthCurrent)
MaxPredDepth = std::max(MaxPredDepth,
PredSU->Depth + I->getLatency());
else {
Done = false;
WorkList.push_back(PredSU);
}
}
if (Done) {
WorkList.pop_back();
if (MaxPredDepth != Cur->Depth) {
Cur->setDepthDirty();
Cur->Depth = MaxPredDepth;
}
Cur->isDepthCurrent = true;
}
} while (!WorkList.empty());
}
示例6: addPred
/// addPred - This adds the specified edge as a pred of the current node if
/// not already. It also adds the current node as a successor of the
/// specified node.
void SUnit::addPred(const SDep &D) {
// If this node already has this depenence, don't add a redundant one.
for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i)
if (Preds[i] == D)
return;
// Now add a corresponding succ to N.
SDep P = D;
P.setSUnit(this);
SUnit *N = D.getSUnit();
// Update the bookkeeping.
if (D.getKind() == SDep::Data) {
++NumPreds;
++N->NumSuccs;
}
if (!N->isScheduled)
++NumPredsLeft;
if (!isScheduled)
++N->NumSuccsLeft;
Preds.push_back(D);
N->Succs.push_back(P);
if (P.getLatency() != 0) {
this->setDepthDirty();
N->setHeightDirty();
}
}
示例7: removePred
/// removePred - This removes the specified edge as a pred of the current
/// node if it exists. It also removes the current node as a successor of
/// the specified node.
void SUnit::removePred(const SDep &D) {
// Find the matching predecessor.
for (SmallVector<SDep, 4>::iterator I = Preds.begin(), E = Preds.end();
I != E; ++I)
if (*I == D) {
bool FoundSucc = false;
// Find the corresponding successor in N.
SDep P = D;
P.setSUnit(this);
SUnit *N = D.getSUnit();
for (SmallVector<SDep, 4>::iterator II = N->Succs.begin(),
EE = N->Succs.end(); II != EE; ++II)
if (*II == P) {
FoundSucc = true;
N->Succs.erase(II);
break;
}
assert(FoundSucc && "Mismatching preds / succs lists!");
Preds.erase(I);
// Update the bookkeeping.
if (P.getKind() == SDep::Data) {
--NumPreds;
--N->NumSuccs;
}
if (!N->isScheduled)
--NumPredsLeft;
if (!isScheduled)
--N->NumSuccsLeft;
if (P.getLatency() != 0) {
this->setDepthDirty();
N->setHeightDirty();
}
return;
}
}
示例8: addPred
/// addPred - This adds the specified edge as a pred of the current node if
/// not already. It also adds the current node as a successor of the
/// specified node.
bool SUnit::addPred(const SDep &D) {
// If this node already has this depenence, don't add a redundant one.
for (SmallVector<SDep, 4>::const_iterator I = Preds.begin(), E = Preds.end();
I != E; ++I)
if (*I == D)
return false;
// Now add a corresponding succ to N.
SDep P = D;
P.setSUnit(this);
SUnit *N = D.getSUnit();
// Update the bookkeeping.
if (D.getKind() == SDep::Data) {
assert(NumPreds < UINT_MAX && "NumPreds will overflow!");
assert(N->NumSuccs < UINT_MAX && "NumSuccs will overflow!");
++NumPreds;
++N->NumSuccs;
}
if (!N->isScheduled) {
assert(NumPredsLeft < UINT_MAX && "NumPredsLeft will overflow!");
++NumPredsLeft;
}
if (!isScheduled) {
assert(N->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
++N->NumSuccsLeft;
}
Preds.push_back(D);
N->Succs.push_back(P);
if (P.getLatency() != 0) {
this->setDepthDirty();
N->setHeightDirty();
}
return true;
}
示例9: assert
/// Select a bundle for the current cycle. The selected instructions are
/// put into bundle in the correct issue order. If no instruction can be
/// issued, false is returned.
bool PatmosLatencyQueue::selectBundle(std::vector<SUnit*> &Bundle)
{
if (AvailableQueue.empty()) return false;
// Find best bundle:
// - Ensure that instructions that MUST be scheduled go into the bundle.
// - find best pair of available programs, e.g. two stores with exclusive
// predicates and highest ILP/.., but only if at least one of those instr.
// has high priority.
// - find best instructions that fit into the bundle with highest ILP/..
//
// Instructions are built up into a bundle in Bundle. Instructions are removed
// from AvailableQueue in scheduled() once the instruction is actually picked.
unsigned CurrWidth = 0;
// If the bundle is not empty, we should calculate the initial width
assert(Bundle.empty());
std::vector<bool> Selected;
Selected.resize(AvailableQueue.size());
// Make sure that all instructions with ScheduleLow flag go into the bundle.
for (unsigned i = 0; i < AvailableQueue.size() && CurrWidth < IssueWidth; i++)
{
SUnit *SU = AvailableQueue[i];
if (!SU->isScheduleLow) break;
if (addToBundle(Bundle, SU, CurrWidth)) {
Selected[i] = true;
}
}
// Check if any of the highest <IssueWidth> instructions can be
// scheduled only with a single other instruction in this queue, or if there
// is any instruction in the queue that can only be scheduled with the highest
// ones. Pick them in any case
// TODO magic goes here..
// Try to fill up the bundle with instructions from the queue by best effort
for (unsigned i = 0; i < AvailableQueue.size() && CurrWidth < IssueWidth; i++)
{
if (Selected[i]) continue;
SUnit *SU = AvailableQueue[i];
// check the width. ignore the width for the first instruction to allow
// ALUl even when bundling is disabled.
unsigned width = PII.getIssueWidth(SU->getInstr());
if (!Bundle.empty() && CurrWidth + width > IssueWidth) continue;
addToBundle(Bundle, SU, CurrWidth);
}
return true;
}
示例10: while
void LatencyPriorityQueue::dump(ScheduleDAG *DAG) const {
LatencyPriorityQueue q = *this;
while (!q.empty()) {
SUnit *su = q.pop();
dbgs() << "Height " << su->getHeight() << ": ";
su->dump(DAG);
}
}
示例11: assert
/// MO is an operand of SU's instruction that defines a physical register. Add
/// data dependencies from SU to any uses of the physical register.
void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx);
assert(MO.isDef() && "expect physreg def");
// Ask the target if address-backscheduling is desirable, and if so how much.
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
unsigned DataLatency = SU->Latency;
for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
Alias.isValid(); ++Alias) {
if (!Uses.contains(*Alias))
continue;
std::vector<PhysRegSUOper> &UseList = Uses[*Alias];
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i].SU;
if (UseSU == SU)
continue;
MachineInstr *UseMI = UseSU->getInstr();
int UseOp = UseList[i].OpIdx;
unsigned LDataLatency = DataLatency;
// Optionally add in a special extra latency for nodes that
// feed addresses.
// TODO: Perhaps we should get rid of
// SpecialAddressLatency and just move this into
// adjustSchedDependency for the targets that care about it.
if (SpecialAddressLatency != 0 && !UnitLatencies &&
UseSU != &ExitSU) {
const MCInstrDesc &UseMCID = UseMI->getDesc();
int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias);
assert(RegUseIndex >= 0 && "UseMI doesn't use register!");
if (RegUseIndex >= 0 &&
(UseMI->mayLoad() || UseMI->mayStore()) &&
(unsigned)RegUseIndex < UseMCID.getNumOperands() &&
UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
LDataLatency += SpecialAddressLatency;
}
// Adjust the dependence latency using operand def/use
// information (if any), and then allow the target to
// perform its own adjustments.
SDep dep(SU, SDep::Data, LDataLatency, *Alias);
if (!UnitLatencies) {
unsigned Latency =
TII->computeOperandLatency(InstrItins, SU->getInstr(), OperIdx,
(UseOp < 0 ? 0 : UseMI), UseOp);
dep.setLatency(Latency);
unsigned MinLatency =
TII->computeOperandLatency(InstrItins, SU->getInstr(), OperIdx,
(UseOp < 0 ? 0 : UseMI), UseOp,
/*FindMin=*/true);
dep.setMinLatency(MinLatency);
ST.adjustSchedDependency(SU, UseSU, dep);
}
UseSU->addPred(dep);
}
}
}
示例12: shouldTFRICallBind
// Check if a call and subsequent A2_tfrpi instructions should maintain
// scheduling affinity. We are looking for the TFRI to be consumed in
// the next instruction. This should help reduce the instances of
// double register pairs being allocated and scheduled before a call
// when not used until after the call. This situation is exacerbated
// by the fact that we allocate the pair from the callee saves list,
// leading to excess spills and restores.
bool HexagonCallMutation::shouldTFRICallBind(const HexagonInstrInfo &HII,
const SUnit &Inst1, const SUnit &Inst2) const {
if (Inst1.getInstr()->getOpcode() != Hexagon::A2_tfrpi)
return false;
// TypeXTYPE are 64 bit operations.
if (HII.getType(Inst2.getInstr()) == HexagonII::TypeXTYPE)
return true;
return false;
}
示例13: pickAlu
SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
SUnit *SU = 0;
IsTopNode = true;
NextInstKind = IDOther;
// check if we might want to switch current clause type
bool AllowSwitchToAlu = (CurInstKind == IDOther) ||
(CurEmitted > InstKindLimit[CurInstKind]) ||
(Available[CurInstKind]->empty());
bool AllowSwitchFromAlu = (CurEmitted > InstKindLimit[CurInstKind]) &&
(!Available[IDFetch]->empty() || !Available[IDOther]->empty());
if ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
(!AllowSwitchFromAlu && CurInstKind == IDAlu)) {
// try to pick ALU
SU = pickAlu();
if (SU) {
if (CurEmitted > InstKindLimit[IDAlu])
CurEmitted = 0;
NextInstKind = IDAlu;
}
}
if (!SU) {
// try to pick FETCH
SU = pickOther(IDFetch);
if (SU)
NextInstKind = IDFetch;
}
// try to pick other
if (!SU) {
SU = pickOther(IDOther);
if (SU)
NextInstKind = IDOther;
}
DEBUG(
if (SU) {
dbgs() << "picked node: ";
SU->dump(DAG);
} else {
dbgs() << "NO NODE ";
for (int i = 0; i < IDLast; ++i) {
Available[i]->dump();
Pending[i]->dump();
}
for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
const SUnit &S = DAG->SUnits[i];
if (!S.isScheduled)
S.dump(DAG);
}
}
);
示例14: addPred
/// addPred - This adds the specified edge as a pred of the current node if
/// not already. It also adds the current node as a successor of the
/// specified node.
bool SUnit::addPred(const SDep &D) {
// If this node already has this depenence, don't add a redundant one.
for (SmallVector<SDep, 4>::iterator I = Preds.begin(), E = Preds.end();
I != E; ++I) {
if (I->overlaps(D)) {
// Extend the latency if needed. Equivalent to removePred(I) + addPred(D).
if (I->getLatency() < D.getLatency()) {
SUnit *PredSU = I->getSUnit();
// Find the corresponding successor in N.
SDep ForwardD = *I;
ForwardD.setSUnit(this);
for (SmallVector<SDep, 4>::iterator II = PredSU->Succs.begin(),
EE = PredSU->Succs.end(); II != EE; ++II) {
if (*II == ForwardD) {
II->setLatency(D.getLatency());
break;
}
}
I->setLatency(D.getLatency());
}
return false;
}
}
// Now add a corresponding succ to N.
SDep P = D;
P.setSUnit(this);
SUnit *N = D.getSUnit();
// Update the bookkeeping.
if (D.getKind() == SDep::Data) {
assert(NumPreds < UINT_MAX && "NumPreds will overflow!");
assert(N->NumSuccs < UINT_MAX && "NumSuccs will overflow!");
++NumPreds;
++N->NumSuccs;
}
if (!N->isScheduled) {
assert(NumPredsLeft < UINT_MAX && "NumPredsLeft will overflow!");
++NumPredsLeft;
}
if (!isScheduled) {
assert(N->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
++N->NumSuccsLeft;
}
Preds.push_back(D);
N->Succs.push_back(P);
if (P.getLatency() != 0) {
this->setDepthDirty();
N->setHeightDirty();
}
return true;
}
示例15: assert
/// MO is an operand of SU's instruction that defines a physical register. Add
/// data dependencies from SU to any uses of the physical register.
void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
const MachineOperand &MO) {
assert(MO.isDef() && "expect physreg def");
// Ask the target if address-backscheduling is desirable, and if so how much.
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
unsigned DataLatency = SU->Latency;
for (const unsigned *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
if (!Uses.contains(*Alias))
continue;
std::vector<SUnit*> &UseList = Uses[*Alias];
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i];
if (UseSU == SU)
continue;
unsigned LDataLatency = DataLatency;
// Optionally add in a special extra latency for nodes that
// feed addresses.
// TODO: Perhaps we should get rid of
// SpecialAddressLatency and just move this into
// adjustSchedDependency for the targets that care about it.
if (SpecialAddressLatency != 0 && !UnitLatencies &&
UseSU != &ExitSU) {
MachineInstr *UseMI = UseSU->getInstr();
const MCInstrDesc &UseMCID = UseMI->getDesc();
int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias);
assert(RegUseIndex >= 0 && "UseMI doesn't use register!");
if (RegUseIndex >= 0 &&
(UseMI->mayLoad() || UseMI->mayStore()) &&
(unsigned)RegUseIndex < UseMCID.getNumOperands() &&
UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
LDataLatency += SpecialAddressLatency;
}
// Adjust the dependence latency using operand def/use
// information (if any), and then allow the target to
// perform its own adjustments.
const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias);
if (!UnitLatencies) {
ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
}
UseSU->addPred(dep);
}
}
}