本文整理汇总了C++中APInt::getHiBits方法的典型用法代码示例。如果您正苦于以下问题:C++ APInt::getHiBits方法的具体用法?C++ APInt::getHiBits怎么用?C++ APInt::getHiBits使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类APInt
的用法示例。
在下文中一共展示了APInt::getHiBits方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: foldOperand
static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
unsigned UseOpIdx,
std::vector<FoldCandidate> &FoldList,
SmallVectorImpl<MachineInstr *> &CopiesToReplace,
const SIInstrInfo *TII, const SIRegisterInfo &TRI,
MachineRegisterInfo &MRI) {
const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
// FIXME: Fold operands with subregs.
if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
UseOp.isImplicit())) {
return;
}
bool FoldingImm = OpToFold.isImm();
APInt Imm;
if (FoldingImm) {
unsigned UseReg = UseOp.getReg();
const TargetRegisterClass *UseRC
= TargetRegisterInfo::isVirtualRegister(UseReg) ?
MRI.getRegClass(UseReg) :
TRI.getPhysRegClass(UseReg);
Imm = APInt(64, OpToFold.getImm());
const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode());
const TargetRegisterClass *FoldRC =
TRI.getRegClass(FoldDesc.OpInfo[0].RegClass);
// Split 64-bit constants into 32-bits for folding.
if (FoldRC->getSize() == 8 && UseOp.getSubReg()) {
if (UseRC->getSize() != 8)
return;
if (UseOp.getSubReg() == AMDGPU::sub0) {
Imm = Imm.getLoBits(32);
} else {
assert(UseOp.getSubReg() == AMDGPU::sub1);
Imm = Imm.getHiBits(32);
}
}
// In order to fold immediates into copies, we need to change the
// copy to a MOV.
if (UseMI->getOpcode() == AMDGPU::COPY) {
unsigned DestReg = UseMI->getOperand(0).getReg();
const TargetRegisterClass *DestRC
= TargetRegisterInfo::isVirtualRegister(DestReg) ?
MRI.getRegClass(DestReg) :
TRI.getPhysRegClass(DestReg);
unsigned MovOp = TII->getMovOpcode(DestRC);
if (MovOp == AMDGPU::COPY)
return;
UseMI->setDesc(TII->get(MovOp));
CopiesToReplace.push_back(UseMI);
}
}
// Special case for REG_SEQUENCE: We can't fold literals into
// REG_SEQUENCE instructions, so we have to fold them into the
// uses of REG_SEQUENCE.
if (UseMI->getOpcode() == AMDGPU::REG_SEQUENCE) {
unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
for (MachineRegisterInfo::use_iterator
RSUse = MRI.use_begin(RegSeqDstReg),
RSE = MRI.use_end(); RSUse != RSE; ++RSUse) {
MachineInstr *RSUseMI = RSUse->getParent();
if (RSUse->getSubReg() != RegSeqDstSubReg)
continue;
foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
CopiesToReplace, TII, TRI, MRI);
}
return;
}
const MCInstrDesc &UseDesc = UseMI->getDesc();
// Don't fold into target independent nodes. Target independent opcodes
// don't have defined register classes.
if (UseDesc.isVariadic() ||
UseDesc.OpInfo[UseOpIdx].RegClass == -1)
return;
if (FoldingImm) {
MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
return;
}
tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
// FIXME: We could try to change the instruction from 64-bit to 32-bit
// to enable more folding opportunites. The shrink operands pass
//.........这里部分代码省略.........
示例2: runOnMachineFunction
bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
MachineRegisterInfo &MRI = MF.getRegInfo();
const SIInstrInfo *TII =
static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
const SIRegisterInfo &TRI = TII->getRegisterInfo();
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
BI != BE; ++BI) {
MachineBasicBlock &MBB = *BI;
MachineBasicBlock::iterator I, Next;
for (I = MBB.begin(); I != MBB.end(); I = Next) {
Next = std::next(I);
MachineInstr &MI = *I;
if (!isSafeToFold(MI.getOpcode()))
continue;
unsigned OpSize = TII->getOpSize(MI, 1);
MachineOperand &OpToFold = MI.getOperand(1);
bool FoldingImm = OpToFold.isImm();
// FIXME: We could also be folding things like FrameIndexes and
// TargetIndexes.
if (!FoldingImm && !OpToFold.isReg())
continue;
// Folding immediates with more than one use will increase program size.
// FIXME: This will also reduce register usage, which may be better
// in some cases. A better heuristic is needed.
if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) &&
!MRI.hasOneUse(MI.getOperand(0).getReg()))
continue;
// FIXME: Fold operands with subregs.
if (OpToFold.isReg() &&
(!TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()) ||
OpToFold.getSubReg()))
continue;
std::vector<FoldCandidate> FoldList;
for (MachineRegisterInfo::use_iterator
Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end();
Use != E; ++Use) {
MachineInstr *UseMI = Use->getParent();
const MachineOperand &UseOp = UseMI->getOperand(Use.getOperandNo());
// FIXME: Fold operands with subregs.
if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
UseOp.isImplicit())) {
continue;
}
APInt Imm;
if (FoldingImm) {
unsigned UseReg = UseOp.getReg();
const TargetRegisterClass *UseRC
= TargetRegisterInfo::isVirtualRegister(UseReg) ?
MRI.getRegClass(UseReg) :
TRI.getRegClass(UseReg);
Imm = APInt(64, OpToFold.getImm());
// Split 64-bit constants into 32-bits for folding.
if (UseOp.getSubReg()) {
if (UseRC->getSize() != 8)
continue;
if (UseOp.getSubReg() == AMDGPU::sub0) {
Imm = Imm.getLoBits(32);
} else {
assert(UseOp.getSubReg() == AMDGPU::sub1);
Imm = Imm.getHiBits(32);
}
}
// In order to fold immediates into copies, we need to change the
// copy to a MOV.
if (UseMI->getOpcode() == AMDGPU::COPY) {
unsigned DestReg = UseMI->getOperand(0).getReg();
const TargetRegisterClass *DestRC
= TargetRegisterInfo::isVirtualRegister(DestReg) ?
MRI.getRegClass(DestReg) :
TRI.getRegClass(DestReg);
unsigned MovOp = TII->getMovOpcode(DestRC);
if (MovOp == AMDGPU::COPY)
continue;
UseMI->setDesc(TII->get(MovOp));
}
}
const MCInstrDesc &UseDesc = UseMI->getDesc();
// Don't fold into target independent nodes. Target independent opcodes
// don't have defined register classes.
if (UseDesc.isVariadic() ||
//.........这里部分代码省略.........