本文整理汇总了C++中MachineBasicBlock::getBasicBlock方法的典型用法代码示例。如果您正苦于以下问题:C++ MachineBasicBlock::getBasicBlock方法的具体用法?C++ MachineBasicBlock::getBasicBlock怎么用?C++ MachineBasicBlock::getBasicBlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MachineBasicBlock
的用法示例。
在下文中一共展示了MachineBasicBlock::getBasicBlock方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: expandToLongBranch
// Expand branch instructions to long branches.
// TODO: This function has to be fixed for beqz16 and bnez16, because it
// currently assumes that all branches have 16-bit offsets, and will produce
// wrong code if branches whose allowed offsets are [-128, -126, ..., 126]
// are present.
void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
MachineBasicBlock::iterator Pos;
MachineBasicBlock *MBB = I.Br->getParent(), *TgtMBB = getTargetMBB(*I.Br);
DebugLoc DL = I.Br->getDebugLoc();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator FallThroughMBB = ++MachineFunction::iterator(MBB);
MachineBasicBlock *LongBrMBB = MF->CreateMachineBasicBlock(BB);
const MipsSubtarget &Subtarget =
static_cast<const MipsSubtarget &>(MF->getSubtarget());
const MipsInstrInfo *TII =
static_cast<const MipsInstrInfo *>(Subtarget.getInstrInfo());
MF->insert(FallThroughMBB, LongBrMBB);
MBB->replaceSuccessor(TgtMBB, LongBrMBB);
if (IsPIC) {
MachineBasicBlock *BalTgtMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(FallThroughMBB, BalTgtMBB);
LongBrMBB->addSuccessor(BalTgtMBB);
BalTgtMBB->addSuccessor(TgtMBB);
// We must select between the MIPS32r6/MIPS64r6 BALC (which is a normal
// instruction) and the pre-MIPS32r6/MIPS64r6 definition (which is an
// pseudo-instruction wrapping BGEZAL).
const unsigned BalOp =
Subtarget.hasMips32r6()
? Subtarget.inMicroMipsMode() ? Mips::BALC_MMR6 : Mips::BALC
: Mips::BAL_BR;
if (!ABI.IsN64()) {
// Pre R6:
// $longbr:
// addiu $sp, $sp, -8
// sw $ra, 0($sp)
// lui $at, %hi($tgt - $baltgt)
// bal $baltgt
// addiu $at, $at, %lo($tgt - $baltgt)
// $baltgt:
// addu $at, $ra, $at
// lw $ra, 0($sp)
// jr $at
// addiu $sp, $sp, 8
// $fallthrough:
//
// R6:
// $longbr:
// addiu $sp, $sp, -8
// sw $ra, 0($sp)
// lui $at, %hi($tgt - $baltgt)
// addiu $at, $at, %lo($tgt - $baltgt)
// balc $baltgt
// $baltgt:
// addu $at, $ra, $at
// lw $ra, 0($sp)
// addiu $sp, $sp, 8
// jic $at, 0
// $fallthrough:
Pos = LongBrMBB->begin();
BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP)
.addReg(Mips::SP).addImm(-8);
BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::SW)).addReg(Mips::RA)
.addReg(Mips::SP).addImm(0);
// LUi and ADDiu instructions create 32-bit offset of the target basic
// block from the target of BAL(C) instruction. We cannot use immediate
// value for this offset because it cannot be determined accurately when
// the program has inline assembly statements. We therefore use the
// relocation expressions %hi($tgt-$baltgt) and %lo($tgt-$baltgt) which
// are resolved during the fixup, so the values will always be correct.
//
// Since we cannot create %hi($tgt-$baltgt) and %lo($tgt-$baltgt)
// expressions at this point (it is possible only at the MC layer),
// we replace LUi and ADDiu with pseudo instructions
// LONG_BRANCH_LUi and LONG_BRANCH_ADDiu, and add both basic
// blocks as operands to these instructions. When lowering these pseudo
// instructions to LUi and ADDiu in the MC layer, we will create
// %hi($tgt-$baltgt) and %lo($tgt-$baltgt) expressions and add them as
// operands to lowered instructions.
BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_LUi), Mips::AT)
.addMBB(TgtMBB).addMBB(BalTgtMBB);
MachineInstrBuilder BalInstr =
BuildMI(*MF, DL, TII->get(BalOp)).addMBB(BalTgtMBB);
MachineInstrBuilder ADDiuInstr =
BuildMI(*MF, DL, TII->get(Mips::LONG_BRANCH_ADDiu), Mips::AT)
.addReg(Mips::AT)
.addMBB(TgtMBB)
.addMBB(BalTgtMBB);
if (Subtarget.hasMips32r6()) {
LongBrMBB->insert(Pos, ADDiuInstr);
LongBrMBB->insert(Pos, BalInstr);
//.........这里部分代码省略.........
示例2: print
void MIPrinter::print(const MachineBasicBlock &MBB) {
assert(MBB.getNumber() >= 0 && "Invalid MBB number");
OS << "bb." << MBB.getNumber();
bool HasAttributes = false;
if (const auto *BB = MBB.getBasicBlock()) {
if (BB->hasName()) {
OS << "." << BB->getName();
} else {
HasAttributes = true;
OS << " (";
int Slot = MST.getLocalSlot(BB);
if (Slot == -1)
OS << "<ir-block badref>";
else
OS << (Twine("%ir-block.") + Twine(Slot)).str();
}
}
if (MBB.hasAddressTaken()) {
OS << (HasAttributes ? ", " : " (");
OS << "address-taken";
HasAttributes = true;
}
if (MBB.isLandingPad()) {
OS << (HasAttributes ? ", " : " (");
OS << "landing-pad";
HasAttributes = true;
}
if (MBB.getAlignment()) {
OS << (HasAttributes ? ", " : " (");
OS << "align " << MBB.getAlignment();
HasAttributes = true;
}
if (HasAttributes)
OS << ")";
OS << ":\n";
bool HasLineAttributes = false;
// Print the successors
if (!MBB.succ_empty()) {
OS.indent(2) << "successors: ";
for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; ++I) {
if (I != MBB.succ_begin())
OS << ", ";
printMBBReference(**I);
if (MBB.hasSuccessorWeights())
OS << '(' << MBB.getSuccWeight(I) << ')';
}
OS << "\n";
HasLineAttributes = true;
}
// Print the live in registers.
const auto *TRI = MBB.getParent()->getSubtarget().getRegisterInfo();
assert(TRI && "Expected target register info");
if (!MBB.livein_empty()) {
OS.indent(2) << "liveins: ";
for (auto I = MBB.livein_begin(), E = MBB.livein_end(); I != E; ++I) {
if (I != MBB.livein_begin())
OS << ", ";
printReg(*I, OS, TRI);
}
OS << "\n";
HasLineAttributes = true;
}
if (HasLineAttributes)
OS << "\n";
bool IsInBundle = false;
for (auto I = MBB.instr_begin(), E = MBB.instr_end(); I != E; ++I) {
const MachineInstr &MI = *I;
if (IsInBundle && !MI.isInsideBundle()) {
OS.indent(2) << "}\n";
IsInBundle = false;
}
OS.indent(IsInBundle ? 4 : 2);
print(MI);
if (!IsInBundle && MI.getFlag(MachineInstr::BundledSucc)) {
OS << " {";
IsInBundle = true;
}
OS << "\n";
}
if (IsInBundle)
OS.indent(2) << "}\n";
}
示例3: analyzeBlockForNullChecks
/// Analyze MBB to check if its terminating branch can be turned into an
/// implicit null check. If yes, append a description of the said null check to
/// NullCheckList and return true, else return false.
bool ImplicitNullChecks::analyzeBlockForNullChecks(
MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
MDNode *BranchMD = nullptr;
if (auto *BB = MBB.getBasicBlock())
BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit);
if (!BranchMD)
return false;
MachineBranchPredicate MBP;
if (TII->AnalyzeBranchPredicate(MBB, MBP, true))
return false;
// Is the predicate comparing an integer to zero?
if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
(MBP.Predicate == MachineBranchPredicate::PRED_NE ||
MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
return false;
// If we cannot erase the test instruction itself, then making the null check
// implicit does not buy us much.
if (!MBP.SingleUseCondition)
return false;
MachineBasicBlock *NotNullSucc, *NullSucc;
if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
NotNullSucc = MBP.TrueDest;
NullSucc = MBP.FalseDest;
} else {
NotNullSucc = MBP.FalseDest;
NullSucc = MBP.TrueDest;
}
// We handle the simplest case for now. We can potentially do better by using
// the machine dominator tree.
if (NotNullSucc->pred_size() != 1)
return false;
// Starting with a code fragment like:
//
// test %RAX, %RAX
// jne LblNotNull
//
// LblNull:
// callq throw_NullPointerException
//
// LblNotNull:
// Inst0
// Inst1
// ...
// Def = Load (%RAX + <offset>)
// ...
//
//
// we want to end up with
//
// Def = FaultingLoad (%RAX + <offset>), LblNull
// jmp LblNotNull ;; explicit or fallthrough
//
// LblNotNull:
// Inst0
// Inst1
// ...
//
// LblNull:
// callq throw_NullPointerException
//
//
// To see why this is legal, consider the two possibilities:
//
// 1. %RAX is null: since we constrain <offset> to be less than PageSize, the
// load instruction dereferences the null page, causing a segmentation
// fault.
//
// 2. %RAX is not null: in this case we know that the load cannot fault, as
// otherwise the load would've faulted in the original program too and the
// original program would've been undefined.
//
// This reasoning cannot be extended to justify hoisting through arbitrary
// control flow. For instance, in the example below (in pseudo-C)
//
// if (ptr == null) { throw_npe(); unreachable; }
// if (some_cond) { return 42; }
// v = ptr->field; // LD
// ...
//
// we cannot (without code duplication) use the load marked "LD" to null check
// ptr -- clause (2) above does not apply in this case. In the above program
// the safety of ptr->field can be dependent on some_cond; and, for instance,
// ptr could be some non-null invalid reference that never gets loaded from
// because some_cond is always true.
unsigned PointerReg = MBP.LHS.getReg();
//.........这里部分代码省略.........
示例4: assert
void X86CmovConverterPass::convertCmovInstsToBranches(
SmallVectorImpl<MachineInstr *> &Group) const {
assert(!Group.empty() && "No CMOV instructions to convert");
++NumOfOptimizedCmovGroups;
// If the CMOV group is not packed, e.g., there are debug instructions between
// first CMOV and last CMOV, then pack the group and make the CMOV instruction
// consecutive by moving the debug instructions to after the last CMOV.
packCmovGroup(Group.front(), Group.back());
// To convert a CMOVcc instruction, we actually have to insert the diamond
// control-flow pattern. The incoming instruction knows the destination vreg
// to set, the condition code register to branch on, the true/false values to
// select between, and a branch opcode to use.
// Before
// -----
// MBB:
// cond = cmp ...
// v1 = CMOVge t1, f1, cond
// v2 = CMOVlt t2, f2, cond
// v3 = CMOVge v1, f3, cond
//
// After
// -----
// MBB:
// cond = cmp ...
// jge %SinkMBB
//
// FalseMBB:
// jmp %SinkMBB
//
// SinkMBB:
// %v1 = phi[%f1, %FalseMBB], [%t1, %MBB]
// %v2 = phi[%t2, %FalseMBB], [%f2, %MBB] ; For CMOV with OppCC switch
// ; true-value with false-value
// %v3 = phi[%f3, %FalseMBB], [%t1, %MBB] ; Phi instruction cannot use
// ; previous Phi instruction result
MachineInstr &MI = *Group.front();
MachineInstr *LastCMOV = Group.back();
DebugLoc DL = MI.getDebugLoc();
X86::CondCode CC = X86::CondCode(X86::getCondFromCMovOpc(MI.getOpcode()));
X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
// Potentially swap the condition codes so that any memory operand to a CMOV
// is in the *false* position instead of the *true* position. We can invert
// any non-memory operand CMOV instructions to cope with this and we ensure
// memory operand CMOVs are only included with a single condition code.
if (llvm::any_of(Group, [&](MachineInstr *I) {
return I->mayLoad() && X86::getCondFromCMovOpc(I->getOpcode()) == CC;
}))
std::swap(CC, OppCC);
MachineBasicBlock *MBB = MI.getParent();
MachineFunction::iterator It = ++MBB->getIterator();
MachineFunction *F = MBB->getParent();
const BasicBlock *BB = MBB->getBasicBlock();
MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(BB);
MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
F->insert(It, FalseMBB);
F->insert(It, SinkMBB);
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
if (checkEFLAGSLive(LastCMOV)) {
FalseMBB->addLiveIn(X86::EFLAGS);
SinkMBB->addLiveIn(X86::EFLAGS);
}
// Transfer the remainder of BB and its successor edges to SinkMBB.
SinkMBB->splice(SinkMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
// Add the false and sink blocks as its successors.
MBB->addSuccessor(FalseMBB);
MBB->addSuccessor(SinkMBB);
// Create the conditional branch instruction.
BuildMI(MBB, DL, TII->get(X86::GetCondBranchFromCond(CC))).addMBB(SinkMBB);
// Add the sink block to the false block successors.
FalseMBB->addSuccessor(SinkMBB);
MachineInstrBuilder MIB;
MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
MachineBasicBlock::iterator MIItEnd =
std::next(MachineBasicBlock::iterator(LastCMOV));
MachineBasicBlock::iterator FalseInsertionPoint = FalseMBB->begin();
MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
// First we need to insert an explicit load on the false path for any memory
// operand. We also need to potentially do register rewriting here, but it is
// simpler as the memory operands are always on the false path so we can
// simply take that input, whatever it is.
DenseMap<unsigned, unsigned> FalseBBRegRewriteTable;
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;) {
auto &MI = *MIIt++;
//.........这里部分代码省略.........
示例5: analyzeBlockForNullChecks
/// Analyze MBB to check if its terminating branch can be turned into an
/// implicit null check. If yes, append a description of the said null check to
/// NullCheckList and return true, else return false.
bool ImplicitNullChecks::analyzeBlockForNullChecks(
MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
MDNode *BranchMD =
MBB.getBasicBlock()
? MBB.getBasicBlock()->getTerminator()->getMetadata(LLVMContext::MD_make_implicit)
: nullptr;
if (!BranchMD)
return false;
MachineBranchPredicate MBP;
if (TII->AnalyzeBranchPredicate(MBB, MBP, true))
return false;
// Is the predicate comparing an integer to zero?
if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
(MBP.Predicate == MachineBranchPredicate::PRED_NE ||
MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
return false;
// If we cannot erase the test instruction itself, then making the null check
// implicit does not buy us much.
if (!MBP.SingleUseCondition)
return false;
MachineBasicBlock *NotNullSucc, *NullSucc;
if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
NotNullSucc = MBP.TrueDest;
NullSucc = MBP.FalseDest;
} else {
NotNullSucc = MBP.FalseDest;
NullSucc = MBP.TrueDest;
}
// We handle the simplest case for now. We can potentially do better by using
// the machine dominator tree.
if (NotNullSucc->pred_size() != 1)
return false;
// Starting with a code fragment like:
//
// test %RAX, %RAX
// jne LblNotNull
//
// LblNull:
// callq throw_NullPointerException
//
// LblNotNull:
// Inst0
// Inst1
// ...
// Def = Load (%RAX + <offset>)
// ...
//
//
// we want to end up with
//
// Def = TrappingLoad (%RAX + <offset>), LblNull
// jmp LblNotNull ;; explicit or fallthrough
//
// LblNotNull:
// Inst0
// Inst1
// ...
//
// LblNull:
// callq throw_NullPointerException
//
unsigned PointerReg = MBP.LHS.getReg();
// As we scan NotNullSucc for a suitable load instruction, we keep track of
// the registers defined and used by the instructions we scan past. This bit
// of information lets us decide if it is legal to hoist the load instruction
// we find (if we do find such an instruction) to before NotNullSucc.
DenseSet<unsigned> RegDefs, RegUses;
// Returns true if it is safe to reorder MI to before NotNullSucc.
auto IsSafeToHoist = [&](MachineInstr *MI) {
// Right now we don't want to worry about LLVM's memory model. This can be
// made more precise later.
for (auto *MMO : MI->memoperands())
if (!MMO->isUnordered())
return false;
for (auto &MO : MI->operands()) {
if (MO.isReg() && MO.getReg()) {
for (unsigned Reg : RegDefs)
if (TRI->regsOverlap(Reg, MO.getReg()))
return false; // We found a write-after-write or read-after-write
if (MO.isDef())
for (unsigned Reg : RegUses)
if (TRI->regsOverlap(Reg, MO.getReg()))
//.........这里部分代码省略.........
示例6: assert
/// fixupConditionalBranch - Fix up a conditional branch whose destination is
/// too far away to fit in its displacement field. It is converted to an inverse
/// conditional branch + an unconditional branch to the destination.
bool AArch64BranchRelaxation::fixupConditionalBranch(MachineInstr &MI) {
DebugLoc DL = MI.getDebugLoc();
MachineBasicBlock *MBB = MI.getParent();
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
bool Fail = TII->analyzeBranch(*MBB, TBB, FBB, Cond);
assert(!Fail && "branches to be relaxed must be analyzable");
(void)Fail;
// Add an unconditional branch to the destination and invert the branch
// condition to jump over it:
// tbz L1
// =>
// tbnz L2
// b L1
// L2:
if (FBB && isBlockInRange(MI, *FBB)) {
// Last MI in the BB is an unconditional branch. We can simply invert the
// condition and swap destinations:
// beq L1
// b L2
// =>
// bne L2
// b L1
DEBUG(dbgs() << " Invert condition and swap "
"its destination with " << MBB->back());
TII->reverseBranchCondition(Cond);
int OldSize = 0, NewSize = 0;
TII->removeBranch(*MBB, &OldSize);
TII->insertBranch(*MBB, FBB, TBB, Cond, DL, &NewSize);
BlockInfo[MBB->getNumber()].Size += (NewSize - OldSize);
return true;
} else if (FBB) {
// We need to split the basic block here to obtain two long-range
// unconditional branches.
auto &NewBB = *MF->CreateMachineBasicBlock(MBB->getBasicBlock());
MF->insert(++MBB->getIterator(), &NewBB);
// Insert an entry into BlockInfo to align it properly with the block
// numbers.
BlockInfo.insert(BlockInfo.begin() + NewBB.getNumber(), BasicBlockInfo());
unsigned &NewBBSize = BlockInfo[NewBB.getNumber()].Size;
int NewBrSize;
TII->insertUnconditionalBranch(NewBB, FBB, DL, &NewBrSize);
NewBBSize += NewBrSize;
// Update the successor lists according to the transformation to follow.
// Do it here since if there's no split, no update is needed.
MBB->replaceSuccessor(FBB, &NewBB);
NewBB.addSuccessor(FBB);
}
// We now have an appropriate fall-through block in place (either naturally or
// just created), so we can invert the condition.
MachineBasicBlock &NextBB = *std::next(MachineFunction::iterator(MBB));
DEBUG(dbgs() << " Insert B to BB#" << TBB->getNumber()
<< ", invert condition and change dest. to BB#"
<< NextBB.getNumber() << '\n');
unsigned &MBBSize = BlockInfo[MBB->getNumber()].Size;
// Insert a new conditional branch and a new unconditional branch.
int RemovedSize = 0;
TII->reverseBranchCondition(Cond);
TII->removeBranch(*MBB, &RemovedSize);
MBBSize -= RemovedSize;
int AddedSize = 0;
TII->insertBranch(*MBB, &NextBB, TBB, Cond, DL, &AddedSize);
MBBSize += AddedSize;
// Finally, keep the block offsets up to date.
adjustBlockOffsets(*MBB);
return true;
}
示例7: getTargetMBB
// Expand branch instructions to long branches.
// TODO: This function has to be fixed for beqz16 and bnez16, because it
// currently assumes that all branches have 16-bit offsets, and will produce
// wrong code if branches whose allowed offsets are [-128, -126, ..., 126]
// are present.
void Cpu0LongBranch::expandToLongBranch(MBBInfo &I) {
MachineBasicBlock::iterator Pos;
MachineBasicBlock *MBB = I.Br->getParent(), *TgtMBB = getTargetMBB(*I.Br);
DebugLoc DL = I.Br->getDebugLoc();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator FallThroughMBB = ++MachineFunction::iterator(MBB);
MachineBasicBlock *LongBrMBB = MF->CreateMachineBasicBlock(BB);
const Cpu0Subtarget &Subtarget =
static_cast<const Cpu0Subtarget &>(MF->getSubtarget());
const Cpu0InstrInfo *TII =
static_cast<const Cpu0InstrInfo *>(Subtarget.getInstrInfo());
MF->insert(FallThroughMBB, LongBrMBB);
MBB->replaceSuccessor(TgtMBB, LongBrMBB);
if (IsPIC) {
MachineBasicBlock *BalTgtMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(FallThroughMBB, BalTgtMBB);
LongBrMBB->addSuccessor(BalTgtMBB);
BalTgtMBB->addSuccessor(TgtMBB);
unsigned BalOp = Cpu0::BAL;
// $longbr:
// addiu $sp, $sp, -8
// st $lr, 0($sp)
// lui $at, %hi($tgt - $baltgt)
// addiu $lr, $lr, %lo($tgt - $baltgt)
// bal $baltgt
// nop
// $baltgt:
// addu $at, $lr, $at
// addiu $sp, $sp, 8
// ld $lr, 0($sp)
// jr $at
// nop
// $fallthrough:
//
Pos = LongBrMBB->begin();
BuildMI(*LongBrMBB, Pos, DL, TII->get(Cpu0::ADDiu), Cpu0::SP)
.addReg(Cpu0::SP).addImm(-8);
BuildMI(*LongBrMBB, Pos, DL, TII->get(Cpu0::ST)).addReg(Cpu0::LR)
.addReg(Cpu0::SP).addImm(0);
// LUi and ADDiu instructions create 32-bit offset of the target basic
// block from the target of BAL instruction. We cannot use immediate
// value for this offset because it cannot be determined accurately when
// the program has inline assembly statements. We therefore use the
// relocation expressions %hi($tgt-$baltgt) and %lo($tgt-$baltgt) which
// are resolved during the fixup, so the values will always be correct.
//
// Since we cannot create %hi($tgt-$baltgt) and %lo($tgt-$baltgt)
// expressions at this point (it is possible only at the MC layer),
// we replace LUi and ADDiu with pseudo instructions
// LONG_BRANCH_LUi and LONG_BRANCH_ADDiu, and add both basic
// blocks as operands to these instructions. When lowering these pseudo
// instructions to LUi and ADDiu in the MC layer, we will create
// %hi($tgt-$baltgt) and %lo($tgt-$baltgt) expressions and add them as
// operands to lowered instructions.
BuildMI(*LongBrMBB, Pos, DL, TII->get(Cpu0::LONG_BRANCH_LUi), Cpu0::AT)
.addMBB(TgtMBB).addMBB(BalTgtMBB);
BuildMI(*LongBrMBB, Pos, DL, TII->get(Cpu0::LONG_BRANCH_ADDiu), Cpu0::AT)
.addReg(Cpu0::AT).addMBB(TgtMBB).addMBB(BalTgtMBB);
MIBundleBuilder(*LongBrMBB, Pos)
.append(BuildMI(*MF, DL, TII->get(BalOp)).addMBB(BalTgtMBB));
Pos = BalTgtMBB->begin();
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Cpu0::ADDu), Cpu0::AT)
.addReg(Cpu0::LR).addReg(Cpu0::AT);
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Cpu0::LD), Cpu0::LR)
.addReg(Cpu0::SP).addImm(0);
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Cpu0::ADDiu), Cpu0::SP)
.addReg(Cpu0::SP).addImm(8);
MIBundleBuilder(*BalTgtMBB, Pos)
.append(BuildMI(*MF, DL, TII->get(Cpu0::JR)).addReg(Cpu0::AT))
.append(BuildMI(*MF, DL, TII->get(Cpu0::NOP)));
assert(LongBrMBB->size() + BalTgtMBB->size() == LongBranchSeqSize);
} else {
// $longbr:
// jmp $tgt
// nop
// $fallthrough:
//
Pos = LongBrMBB->begin();
LongBrMBB->addSuccessor(TgtMBB);
MIBundleBuilder(*LongBrMBB, Pos)
.append(BuildMI(*MF, DL, TII->get(Cpu0::JMP)).addMBB(TgtMBB))
.append(BuildMI(*MF, DL, TII->get(Cpu0::NOP)));
//.........这里部分代码省略.........
示例8: analyzeBlockForNullChecks
/// Analyze MBB to check if its terminating branch can be turned into an
/// implicit null check. If yes, append a description of the said null check to
/// NullCheckList and return true, else return false.
bool ImplicitNullChecks::analyzeBlockForNullChecks(
MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
MDNode *BranchMD = nullptr;
if (auto *BB = MBB.getBasicBlock())
BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit);
if (!BranchMD)
return false;
MachineBranchPredicate MBP;
if (TII->analyzeBranchPredicate(MBB, MBP, true))
return false;
// Is the predicate comparing an integer to zero?
if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
(MBP.Predicate == MachineBranchPredicate::PRED_NE ||
MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
return false;
// If we cannot erase the test instruction itself, then making the null check
// implicit does not buy us much.
if (!MBP.SingleUseCondition)
return false;
MachineBasicBlock *NotNullSucc, *NullSucc;
if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
NotNullSucc = MBP.TrueDest;
NullSucc = MBP.FalseDest;
} else {
NotNullSucc = MBP.FalseDest;
NullSucc = MBP.TrueDest;
}
// We handle the simplest case for now. We can potentially do better by using
// the machine dominator tree.
if (NotNullSucc->pred_size() != 1)
return false;
// Starting with a code fragment like:
//
// test %RAX, %RAX
// jne LblNotNull
//
// LblNull:
// callq throw_NullPointerException
//
// LblNotNull:
// Inst0
// Inst1
// ...
// Def = Load (%RAX + <offset>)
// ...
//
//
// we want to end up with
//
// Def = FaultingLoad (%RAX + <offset>), LblNull
// jmp LblNotNull ;; explicit or fallthrough
//
// LblNotNull:
// Inst0
// Inst1
// ...
//
// LblNull:
// callq throw_NullPointerException
//
//
// To see why this is legal, consider the two possibilities:
//
// 1. %RAX is null: since we constrain <offset> to be less than PageSize, the
// load instruction dereferences the null page, causing a segmentation
// fault.
//
// 2. %RAX is not null: in this case we know that the load cannot fault, as
// otherwise the load would've faulted in the original program too and the
// original program would've been undefined.
//
// This reasoning cannot be extended to justify hoisting through arbitrary
// control flow. For instance, in the example below (in pseudo-C)
//
// if (ptr == null) { throw_npe(); unreachable; }
// if (some_cond) { return 42; }
// v = ptr->field; // LD
// ...
//
// we cannot (without code duplication) use the load marked "LD" to null check
// ptr -- clause (2) above does not apply in this case. In the above program
// the safety of ptr->field can be dependent on some_cond; and, for instance,
// ptr could be some non-null invalid reference that never gets loaded from
// because some_cond is always true.
const unsigned PointerReg = MBP.LHS.getReg();
//.........这里部分代码省略.........
示例9: runOnMachineFunction
bool FPRegKiller::runOnMachineFunction(MachineFunction &MF) {
// If we are emitting FP stack code, scan the basic block to determine if this
// block defines any FP values. If so, put an FP_REG_KILL instruction before
// the terminator of the block.
// Note that FP stack instructions are used in all modes for long double,
// so we always need to do this check.
// Also note that it's possible for an FP stack register to be live across
// an instruction that produces multiple basic blocks (SSE CMOV) so we
// must check all the generated basic blocks.
// Scan all of the machine instructions in these MBBs, checking for FP
// stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
// Fast-path: If nothing is using the x87 registers, we don't need to do
// any scanning.
MachineRegisterInfo &MRI = MF.getRegInfo();
if (MRI.getRegClassVirtRegs(X86::RFP80RegisterClass).empty() &&
MRI.getRegClassVirtRegs(X86::RFP64RegisterClass).empty() &&
MRI.getRegClassVirtRegs(X86::RFP32RegisterClass).empty())
return false;
bool Changed = false;
const X86Subtarget &Subtarget = MF.getTarget().getSubtarget<X86Subtarget>();
MachineFunction::iterator MBBI = MF.begin();
MachineFunction::iterator EndMBB = MF.end();
for (; MBBI != EndMBB; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
// If this block returns, ignore it. We don't want to insert an FP_REG_KILL
// before the return.
if (!MBB->empty()) {
MachineBasicBlock::iterator EndI = MBB->end();
--EndI;
if (EndI->getDesc().isReturn())
continue;
}
bool ContainsFPCode = false;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
!ContainsFPCode && I != E; ++I) {
if (I->getNumOperands() != 0 && I->getOperand(0).isReg()) {
const TargetRegisterClass *clas;
for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
if (I->getOperand(op).isReg() && I->getOperand(op).isDef() &&
TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
((clas = MRI.getRegClass(I->getOperand(op).getReg())) ==
X86::RFP32RegisterClass ||
clas == X86::RFP64RegisterClass ||
clas == X86::RFP80RegisterClass)) {
ContainsFPCode = true;
break;
}
}
}
}
// Check PHI nodes in successor blocks. These PHI's will be lowered to have
// a copy of the input value in this block. In SSE mode, we only care about
// 80-bit values.
if (!ContainsFPCode) {
// Final check, check LLVM BB's that are successors to the LLVM BB
// corresponding to BB for FP PHI nodes.
const BasicBlock *LLVMBB = MBB->getBasicBlock();
const PHINode *PN;
for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
!ContainsFPCode && SI != E; ++SI) {
for (BasicBlock::const_iterator II = SI->begin();
(PN = dyn_cast<PHINode>(II)); ++II) {
if (PN->getType()==Type::getX86_FP80Ty(LLVMBB->getContext()) ||
(!Subtarget.hasSSE1() && PN->getType()->isFloatingPoint()) ||
(!Subtarget.hasSSE2() &&
PN->getType()==Type::getDoubleTy(LLVMBB->getContext()))) {
ContainsFPCode = true;
break;
}
}
}
}
// Finally, if we found any FP code, emit the FP_REG_KILL instruction.
if (ContainsFPCode) {
BuildMI(*MBB, MBBI->getFirstTerminator(), DebugLoc::getUnknownLoc(),
MF.getTarget().getInstrInfo()->get(X86::FP_REG_KILL));
++NumFPKill;
Changed = true;
}
}
return Changed;
}
示例10: expandAtomicBinOp
bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &NMBBI,
unsigned Size) {
MachineFunction *MF = BB.getParent();
const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
DebugLoc DL = I->getDebugLoc();
unsigned LL, SC, ZERO, BEQ;
if (Size == 4) {
if (STI->inMicroMipsMode()) {
LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
} else {
LL = STI->hasMips32r6()
? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
: (ArePtrs64bit ? Mips::LL64 : Mips::LL);
SC = STI->hasMips32r6()
? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
: (ArePtrs64bit ? Mips::SC64 : Mips::SC);
BEQ = Mips::BEQ;
}
ZERO = Mips::ZERO;
} else {
LL = STI->hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
SC = STI->hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
ZERO = Mips::ZERO_64;
BEQ = Mips::BEQ64;
}
unsigned OldVal = I->getOperand(0).getReg();
unsigned Ptr = I->getOperand(1).getReg();
unsigned Incr = I->getOperand(2).getReg();
unsigned Scratch = I->getOperand(3).getReg();
unsigned Opcode = 0;
unsigned OR = 0;
unsigned AND = 0;
unsigned NOR = 0;
bool IsNand = false;
switch (I->getOpcode()) {
case Mips::ATOMIC_LOAD_ADD_I32_POSTRA:
Opcode = Mips::ADDu;
break;
case Mips::ATOMIC_LOAD_SUB_I32_POSTRA:
Opcode = Mips::SUBu;
break;
case Mips::ATOMIC_LOAD_AND_I32_POSTRA:
Opcode = Mips::AND;
break;
case Mips::ATOMIC_LOAD_OR_I32_POSTRA:
Opcode = Mips::OR;
break;
case Mips::ATOMIC_LOAD_XOR_I32_POSTRA:
Opcode = Mips::XOR;
break;
case Mips::ATOMIC_LOAD_NAND_I32_POSTRA:
IsNand = true;
AND = Mips::AND;
NOR = Mips::NOR;
break;
case Mips::ATOMIC_SWAP_I32_POSTRA:
OR = Mips::OR;
break;
case Mips::ATOMIC_LOAD_ADD_I64_POSTRA:
Opcode = Mips::DADDu;
break;
case Mips::ATOMIC_LOAD_SUB_I64_POSTRA:
Opcode = Mips::DSUBu;
break;
case Mips::ATOMIC_LOAD_AND_I64_POSTRA:
Opcode = Mips::AND64;
break;
case Mips::ATOMIC_LOAD_OR_I64_POSTRA:
Opcode = Mips::OR64;
break;
case Mips::ATOMIC_LOAD_XOR_I64_POSTRA:
Opcode = Mips::XOR64;
break;
case Mips::ATOMIC_LOAD_NAND_I64_POSTRA:
IsNand = true;
AND = Mips::AND64;
NOR = Mips::NOR64;
break;
case Mips::ATOMIC_SWAP_I64_POSTRA:
OR = Mips::OR64;
break;
default:
llvm_unreachable("Unknown pseudo atomic!");
}
const BasicBlock *LLVM_BB = BB.getBasicBlock();
MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++BB.getIterator();
MF->insert(It, loopMBB);
//.........这里部分代码省略.........
示例11: expandAtomicCmpSwapSubword
bool MipsExpandPseudo::expandAtomicCmpSwapSubword(
MachineBasicBlock &BB, MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &NMBBI) {
MachineFunction *MF = BB.getParent();
const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
DebugLoc DL = I->getDebugLoc();
unsigned LL, SC;
unsigned ZERO = Mips::ZERO;
unsigned BNE = Mips::BNE;
unsigned BEQ = Mips::BEQ;
unsigned SEOp =
I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I8_POSTRA ? Mips::SEB : Mips::SEH;
if (STI->inMicroMipsMode()) {
LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
BNE = STI->hasMips32r6() ? Mips::BNEC_MMR6 : Mips::BNE_MM;
BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
} else {
LL = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
: (ArePtrs64bit ? Mips::LL64 : Mips::LL);
SC = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
: (ArePtrs64bit ? Mips::SC64 : Mips::SC);
}
unsigned Dest = I->getOperand(0).getReg();
unsigned Ptr = I->getOperand(1).getReg();
unsigned Mask = I->getOperand(2).getReg();
unsigned ShiftCmpVal = I->getOperand(3).getReg();
unsigned Mask2 = I->getOperand(4).getReg();
unsigned ShiftNewVal = I->getOperand(5).getReg();
unsigned ShiftAmnt = I->getOperand(6).getReg();
unsigned Scratch = I->getOperand(7).getReg();
unsigned Scratch2 = I->getOperand(8).getReg();
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB.getBasicBlock();
MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++BB.getIterator();
MF->insert(It, loop1MBB);
MF->insert(It, loop2MBB);
MF->insert(It, sinkMBB);
MF->insert(It, exitMBB);
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), &BB,
std::next(MachineBasicBlock::iterator(I)), BB.end());
exitMBB->transferSuccessorsAndUpdatePHIs(&BB);
// thisMBB:
// ...
// fallthrough --> loop1MBB
BB.addSuccessor(loop1MBB, BranchProbability::getOne());
loop1MBB->addSuccessor(sinkMBB);
loop1MBB->addSuccessor(loop2MBB);
loop1MBB->normalizeSuccProbs();
loop2MBB->addSuccessor(loop1MBB);
loop2MBB->addSuccessor(sinkMBB);
loop2MBB->normalizeSuccProbs();
sinkMBB->addSuccessor(exitMBB, BranchProbability::getOne());
// loop1MBB:
// ll dest, 0(ptr)
// and Mask', dest, Mask
// bne Mask', ShiftCmpVal, exitMBB
BuildMI(loop1MBB, DL, TII->get(LL), Scratch).addReg(Ptr).addImm(0);
BuildMI(loop1MBB, DL, TII->get(Mips::AND), Scratch2)
.addReg(Scratch)
.addReg(Mask);
BuildMI(loop1MBB, DL, TII->get(BNE))
.addReg(Scratch2).addReg(ShiftCmpVal).addMBB(sinkMBB);
// loop2MBB:
// and dest, dest, mask2
// or dest, dest, ShiftNewVal
// sc dest, dest, 0(ptr)
// beq dest, $0, loop1MBB
BuildMI(loop2MBB, DL, TII->get(Mips::AND), Scratch)
.addReg(Scratch, RegState::Kill)
.addReg(Mask2);
BuildMI(loop2MBB, DL, TII->get(Mips::OR), Scratch)
.addReg(Scratch, RegState::Kill)
.addReg(ShiftNewVal);
BuildMI(loop2MBB, DL, TII->get(SC), Scratch)
.addReg(Scratch, RegState::Kill)
.addReg(Ptr)
.addImm(0);
BuildMI(loop2MBB, DL, TII->get(BEQ))
.addReg(Scratch, RegState::Kill)
.addReg(ZERO)
.addMBB(loop1MBB);
// sinkMBB:
// srl srlres, Mask', shiftamt
//.........这里部分代码省略.........
示例12: expandAtomicBinOpSubword
bool MipsExpandPseudo::expandAtomicBinOpSubword(
MachineBasicBlock &BB, MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &NMBBI) {
MachineFunction *MF = BB.getParent();
const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
DebugLoc DL = I->getDebugLoc();
unsigned LL, SC;
unsigned BEQ = Mips::BEQ;
unsigned SEOp = Mips::SEH;
if (STI->inMicroMipsMode()) {
LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
} else {
LL = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
: (ArePtrs64bit ? Mips::LL64 : Mips::LL);
SC = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
: (ArePtrs64bit ? Mips::SC64 : Mips::SC);
}
bool IsSwap = false;
bool IsNand = false;
unsigned Opcode = 0;
switch (I->getOpcode()) {
case Mips::ATOMIC_LOAD_NAND_I8_POSTRA:
SEOp = Mips::SEB;
LLVM_FALLTHROUGH;
case Mips::ATOMIC_LOAD_NAND_I16_POSTRA:
IsNand = true;
break;
case Mips::ATOMIC_SWAP_I8_POSTRA:
SEOp = Mips::SEB;
LLVM_FALLTHROUGH;
case Mips::ATOMIC_SWAP_I16_POSTRA:
IsSwap = true;
break;
case Mips::ATOMIC_LOAD_ADD_I8_POSTRA:
SEOp = Mips::SEB;
LLVM_FALLTHROUGH;
case Mips::ATOMIC_LOAD_ADD_I16_POSTRA:
Opcode = Mips::ADDu;
break;
case Mips::ATOMIC_LOAD_SUB_I8_POSTRA:
SEOp = Mips::SEB;
LLVM_FALLTHROUGH;
case Mips::ATOMIC_LOAD_SUB_I16_POSTRA:
Opcode = Mips::SUBu;
break;
case Mips::ATOMIC_LOAD_AND_I8_POSTRA:
SEOp = Mips::SEB;
LLVM_FALLTHROUGH;
case Mips::ATOMIC_LOAD_AND_I16_POSTRA:
Opcode = Mips::AND;
break;
case Mips::ATOMIC_LOAD_OR_I8_POSTRA:
SEOp = Mips::SEB;
LLVM_FALLTHROUGH;
case Mips::ATOMIC_LOAD_OR_I16_POSTRA:
Opcode = Mips::OR;
break;
case Mips::ATOMIC_LOAD_XOR_I8_POSTRA:
SEOp = Mips::SEB;
LLVM_FALLTHROUGH;
case Mips::ATOMIC_LOAD_XOR_I16_POSTRA:
Opcode = Mips::XOR;
break;
default:
llvm_unreachable("Unknown subword atomic pseudo for expansion!");
}
unsigned Dest = I->getOperand(0).getReg();
unsigned Ptr = I->getOperand(1).getReg();
unsigned Incr = I->getOperand(2).getReg();
unsigned Mask = I->getOperand(3).getReg();
unsigned Mask2 = I->getOperand(4).getReg();
unsigned ShiftAmnt = I->getOperand(5).getReg();
unsigned OldVal = I->getOperand(6).getReg();
unsigned BinOpRes = I->getOperand(7).getReg();
unsigned StoreVal = I->getOperand(8).getReg();
const BasicBlock *LLVM_BB = BB.getBasicBlock();
MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++BB.getIterator();
MF->insert(It, loopMBB);
MF->insert(It, sinkMBB);
MF->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), &BB, std::next(I), BB.end());
exitMBB->transferSuccessorsAndUpdatePHIs(&BB);
BB.addSuccessor(loopMBB, BranchProbability::getOne());
loopMBB->addSuccessor(sinkMBB);
loopMBB->addSuccessor(loopMBB);
//.........这里部分代码省略.........
示例13: expandAtomicCmpSwap
bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &NMBBI) {
const unsigned Size =
I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I32_POSTRA ? 4 : 8;
MachineFunction *MF = BB.getParent();
const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
DebugLoc DL = I->getDebugLoc();
unsigned LL, SC, ZERO, BNE, BEQ, MOVE;
if (Size == 4) {
if (STI->inMicroMipsMode()) {
LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
BNE = STI->hasMips32r6() ? Mips::BNEC_MMR6 : Mips::BNE_MM;
BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
} else {
LL = STI->hasMips32r6()
? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
: (ArePtrs64bit ? Mips::LL64 : Mips::LL);
SC = STI->hasMips32r6()
? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
: (ArePtrs64bit ? Mips::SC64 : Mips::SC);
BNE = Mips::BNE;
BEQ = Mips::BEQ;
}
ZERO = Mips::ZERO;
MOVE = Mips::OR;
} else {
LL = STI->hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
SC = STI->hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
ZERO = Mips::ZERO_64;
BNE = Mips::BNE64;
BEQ = Mips::BEQ64;
MOVE = Mips::OR64;
}
unsigned Dest = I->getOperand(0).getReg();
unsigned Ptr = I->getOperand(1).getReg();
unsigned OldVal = I->getOperand(2).getReg();
unsigned NewVal = I->getOperand(3).getReg();
unsigned Scratch = I->getOperand(4).getReg();
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB.getBasicBlock();
MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++BB.getIterator();
MF->insert(It, loop1MBB);
MF->insert(It, loop2MBB);
MF->insert(It, exitMBB);
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), &BB,
std::next(MachineBasicBlock::iterator(I)), BB.end());
exitMBB->transferSuccessorsAndUpdatePHIs(&BB);
// thisMBB:
// ...
// fallthrough --> loop1MBB
BB.addSuccessor(loop1MBB, BranchProbability::getOne());
loop1MBB->addSuccessor(exitMBB);
loop1MBB->addSuccessor(loop2MBB);
loop1MBB->normalizeSuccProbs();
loop2MBB->addSuccessor(loop1MBB);
loop2MBB->addSuccessor(exitMBB);
loop2MBB->normalizeSuccProbs();
// loop1MBB:
// ll dest, 0(ptr)
// bne dest, oldval, exitMBB
BuildMI(loop1MBB, DL, TII->get(LL), Dest).addReg(Ptr).addImm(0);
BuildMI(loop1MBB, DL, TII->get(BNE))
.addReg(Dest, RegState::Kill).addReg(OldVal).addMBB(exitMBB);
// loop2MBB:
// move scratch, NewVal
// sc Scratch, Scratch, 0(ptr)
// beq Scratch, $0, loop1MBB
BuildMI(loop2MBB, DL, TII->get(MOVE), Scratch).addReg(NewVal).addReg(ZERO);
BuildMI(loop2MBB, DL, TII->get(SC), Scratch)
.addReg(Scratch).addReg(Ptr).addImm(0);
BuildMI(loop2MBB, DL, TII->get(BEQ))
.addReg(Scratch, RegState::Kill).addReg(ZERO).addMBB(loop1MBB);
LivePhysRegs LiveRegs;
computeAndAddLiveIns(LiveRegs, *loop1MBB);
computeAndAddLiveIns(LiveRegs, *loop2MBB);
computeAndAddLiveIns(LiveRegs, *exitMBB);
NMBBI = BB.end();
I->eraseFromParent();
return true;
}
示例14: convertToHardwareLoop
/// converToHardwareLoop - check if the loop is a candidate for
/// converting to a hardware loop. If so, then perform the
/// transformation.
///
/// This function works on innermost loops first. A loop can
/// be converted if it is a counting loop; either a register
/// value or an immediate.
///
/// The code makes several assumptions about the representation
/// of the loop in llvm.
bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
bool Changed = false;
// Process nested loops first.
for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
Changed |= convertToHardwareLoop(*I);
}
// If a nested loop has been converted, then we can't convert this loop.
if (Changed) {
return Changed;
}
// Are we able to determine the trip count for the loop?
CountValue *TripCount = getTripCount(L);
if (TripCount == 0) {
return false;
}
// Does the loop contain any invalid instructions?
if (containsInvalidInstruction(L)) {
return false;
}
MachineBasicBlock *Preheader = L->getLoopPreheader();
// No preheader means there's not place for the loop instr.
if (Preheader == 0) {
return false;
}
MachineBasicBlock::iterator InsertPos = Preheader->getFirstTerminator();
MachineBasicBlock *LastMBB = L->getExitingBlock();
// Don't generate hw loop if the loop has more than one exit.
if (LastMBB == 0) {
return false;
}
MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator();
// Determine the loop start.
MachineBasicBlock *LoopStart = L->getTopBlock();
if (L->getLoopLatch() != LastMBB) {
// When the exit and latch are not the same, use the latch block as the
// start.
// The loop start address is used only after the 1st iteration, and the loop
// latch may contains instrs. that need to be executed after the 1st iter.
LoopStart = L->getLoopLatch();
// Make sure the latch is a successor of the exit, otherwise it won't work.
if (!LastMBB->isSuccessor(LoopStart)) {
return false;
}
}
// Convert the loop to a hardware loop
DEBUG(dbgs() << "Change to hardware loop at "; L->dump());
if (TripCount->isReg()) {
// Create a copy of the loop count register.
MachineFunction *MF = LastMBB->getParent();
const TargetRegisterClass *RC =
MF->getRegInfo().getRegClass(TripCount->getReg());
unsigned CountReg = MF->getRegInfo().createVirtualRegister(RC);
BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
TII->get(TargetOpcode::COPY), CountReg).addReg(TripCount->getReg());
if (TripCount->isNeg()) {
unsigned CountReg1 = CountReg;
CountReg = MF->getRegInfo().createVirtualRegister(RC);
BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
TII->get(Hexagon::NEG), CountReg).addReg(CountReg1);
}
// Add the Loop instruction to the begining of the loop.
BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
TII->get(Hexagon::LOOP0_r)).addMBB(LoopStart).addReg(CountReg);
} else {
assert(TripCount->isImm() && "Expecting immedate vaule for trip count");
// Add the Loop immediate instruction to the beginning of the loop.
int64_t CountImm = TripCount->getImm();
BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
TII->get(Hexagon::LOOP0_i)).addMBB(LoopStart).addImm(CountImm);
}
// Make sure the loop start always has a reference in the CFG. We need to
// create a BlockAddress operand to get this mechanism to work both the
// MachineBasicBlock and BasicBlock objects need the flag set.
LoopStart->setHasAddressTaken();
// This line is needed to set the hasAddressTaken flag on the BasicBlock
// object
BlockAddress::get(const_cast<BasicBlock *>(LoopStart->getBasicBlock()));
// Replace the loop branch with an endloop instruction.
DebugLoc dl = LastI->getDebugLoc();
BuildMI(*LastMBB, LastI, dl, TII->get(Hexagon::ENDLOOP0)).addMBB(LoopStart);
// The loop ends with either:
// - a conditional branch followed by an unconditional branch, or
//.........这里部分代码省略.........
示例15: splitMBB
/// Splits a MachineBasicBlock to branch before \p SplitBefore. The original
/// branch is \p OrigBranch. The target of the new branch can either be the same
/// as the target of the original branch or the fallthrough successor of the
/// original block as determined by \p BranchToFallThrough. The branch
/// conditions will be inverted according to \p InvertNewBranch and
/// \p InvertOrigBranch. If an instruction that previously fed the branch is to
/// be deleted, it is provided in \p MIToDelete and \p NewCond will be used as
/// the branch condition. The branch probabilities will be set if the
/// MachineBranchProbabilityInfo isn't null.
static bool splitMBB(BlockSplitInfo &BSI) {
assert(BSI.allInstrsInSameMBB() &&
"All instructions must be in the same block.");
MachineBasicBlock *ThisMBB = BSI.OrigBranch->getParent();
MachineFunction *MF = ThisMBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
assert(MRI->isSSA() && "Can only do this while the function is in SSA form.");
if (ThisMBB->succ_size() != 2) {
LLVM_DEBUG(
dbgs() << "Don't know how to handle blocks that don't have exactly"
<< " two succesors.\n");
return false;
}
const PPCInstrInfo *TII = MF->getSubtarget<PPCSubtarget>().getInstrInfo();
unsigned OrigBROpcode = BSI.OrigBranch->getOpcode();
unsigned InvertedOpcode =
OrigBROpcode == PPC::BC
? PPC::BCn
: OrigBROpcode == PPC::BCn
? PPC::BC
: OrigBROpcode == PPC::BCLR ? PPC::BCLRn : PPC::BCLR;
unsigned NewBROpcode = BSI.InvertNewBranch ? InvertedOpcode : OrigBROpcode;
MachineBasicBlock *OrigTarget = BSI.OrigBranch->getOperand(1).getMBB();
MachineBasicBlock *OrigFallThrough = OrigTarget == *ThisMBB->succ_begin()
? *ThisMBB->succ_rbegin()
: *ThisMBB->succ_begin();
MachineBasicBlock *NewBRTarget =
BSI.BranchToFallThrough ? OrigFallThrough : OrigTarget;
BranchProbability ProbToNewTarget =
!BSI.MBPI ? BranchProbability::getUnknown()
: BSI.MBPI->getEdgeProbability(ThisMBB, NewBRTarget);
// Create a new basic block.
MachineBasicBlock::iterator InsertPoint = BSI.SplitBefore;
const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
MachineFunction::iterator It = ThisMBB->getIterator();
MachineBasicBlock *NewMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MF->insert(++It, NewMBB);
// Move everything after SplitBefore into the new block.
NewMBB->splice(NewMBB->end(), ThisMBB, InsertPoint, ThisMBB->end());
NewMBB->transferSuccessors(ThisMBB);
// Add the two successors to ThisMBB. The probabilities come from the
// existing blocks if available.
ThisMBB->addSuccessor(NewBRTarget, ProbToNewTarget);
ThisMBB->addSuccessor(NewMBB, ProbToNewTarget.getCompl());
// Add the branches to ThisMBB.
BuildMI(*ThisMBB, ThisMBB->end(), BSI.SplitBefore->getDebugLoc(),
TII->get(NewBROpcode))
.addReg(BSI.SplitCond->getOperand(0).getReg())
.addMBB(NewBRTarget);
BuildMI(*ThisMBB, ThisMBB->end(), BSI.SplitBefore->getDebugLoc(),
TII->get(PPC::B))
.addMBB(NewMBB);
if (BSI.MIToDelete)
BSI.MIToDelete->eraseFromParent();
// Change the condition on the original branch and invert it if requested.
auto FirstTerminator = NewMBB->getFirstTerminator();
if (BSI.NewCond) {
assert(FirstTerminator->getOperand(0).isReg() &&
"Can't update condition of unconditional branch.");
FirstTerminator->getOperand(0).setReg(BSI.NewCond->getOperand(0).getReg());
}
if (BSI.InvertOrigBranch)
FirstTerminator->setDesc(TII->get(InvertedOpcode));
// If any of the PHIs in the successors of NewMBB reference values that
// now come from NewMBB, they need to be updated.
for (auto *Succ : NewMBB->successors()) {
updatePHIs(Succ, ThisMBB, NewMBB, MRI);
}
addIncomingValuesToPHIs(NewBRTarget, ThisMBB, NewMBB, MRI);
LLVM_DEBUG(dbgs() << "After splitting, ThisMBB:\n"; ThisMBB->dump());
LLVM_DEBUG(dbgs() << "NewMBB:\n"; NewMBB->dump());
LLVM_DEBUG(dbgs() << "New branch-to block:\n"; NewBRTarget->dump());
return true;
}