本文整理汇总了C++中LLT::isVector方法的典型用法代码示例。如果您正苦于以下问题:C++ LLT::isVector方法的具体用法?C++ LLT::isVector怎么用?C++ LLT::isVector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LLT
的用法示例。
在下文中一共展示了LLT::isVector方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mutationIsSane
// Make sure the returned mutation makes sense for the match type.
static bool mutationIsSane(const LegalizeRule &Rule,
const LegalityQuery &Q,
std::pair<unsigned, LLT> Mutation) {
const unsigned TypeIdx = Mutation.first;
const LLT OldTy = Q.Types[TypeIdx];
const LLT NewTy = Mutation.second;
switch (Rule.getAction()) {
case FewerElements:
case MoreElements: {
if (!OldTy.isVector())
return false;
if (NewTy.isVector()) {
if (Rule.getAction() == FewerElements) {
// Make sure the element count really decreased.
if (NewTy.getNumElements() >= OldTy.getNumElements())
return false;
} else {
// Make sure the element count really increased.
if (NewTy.getNumElements() <= OldTy.getNumElements())
return false;
}
}
// Make sure the element type didn't change.
return NewTy.getScalarType() == OldTy.getElementType();
}
case NarrowScalar:
case WidenScalar: {
if (OldTy.isVector()) {
// Number of elements should not change.
if (!NewTy.isVector() || OldTy.getNumElements() != NewTy.getNumElements())
return false;
} else {
// Both types must be vectors
if (NewTy.isVector())
return false;
}
if (Rule.getAction() == NarrowScalar) {
// Make sure the size really decreased.
if (NewTy.getScalarSizeInBits() >= OldTy.getScalarSizeInBits())
return false;
} else {
// Make sure the size really increased.
if (NewTy.getScalarSizeInBits() <= OldTy.getScalarSizeInBits())
return false;
}
return true;
}
default:
return true;
}
}
示例2: isSmallOddVector
static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
return [=](const LegalityQuery &Query) {
const LLT Ty = Query.Types[TypeIdx];
return Ty.isVector() &&
Ty.getNumElements() % 2 != 0 &&
Ty.getElementType().getSizeInBits() < 32;
};
}
示例3: getInstructionMapping
const RegisterBankInfo::InstructionMapping &
AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
unsigned NumOperands = MI.getNumOperands();
assert(NumOperands <= 3 &&
"This code is for instructions with 3 or less operands");
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
unsigned Size = Ty.getSizeInBits();
bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
PartialMappingIdx RBIdx = IsFPR ? PMI_FirstFPR : PMI_FirstGPR;
#ifndef NDEBUG
// Make sure all the operands are using similar size and type.
// Should probably be checked by the machine verifier.
// This code won't catch cases where the number of lanes is
// different between the operands.
// If we want to go to that level of details, it is probably
// best to check that the types are the same, period.
// Currently, we just check that the register banks are the same
// for each types.
for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
assert(
AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(
RBIdx, OpTy.getSizeInBits()) ==
AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(RBIdx, Size) &&
"Operand has incompatible size");
bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
(void)OpIsFPR;
assert(IsFPR == OpIsFPR && "Operand has incompatible type");
}
#endif // End NDEBUG.
return getInstructionMapping(DefaultMappingID, 1,
getValueMapping(RBIdx, Size), NumOperands);
}
示例4: validateTruncExt
void MachineIRBuilder::validateTruncExt(unsigned Dst, unsigned Src,
bool IsExtend) {
#ifndef NDEBUG
LLT SrcTy = MRI->getType(Src);
LLT DstTy = MRI->getType(Dst);
if (DstTy.isVector()) {
assert(SrcTy.isVector() && "mismatched cast between vecot and non-vector");
assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
"different number of elements in a trunc/ext");
} else
assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
if (IsExtend)
assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
"invalid narrowing extend");
else
assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
"invalid widening trunc");
#endif
}
示例5: computeTables
void MachineLegalizer::computeTables() {
for (auto &Op : Actions) {
LLT Ty = Op.first.second;
if (!Ty.isVector())
continue;
auto &Entry =
MaxLegalVectorElts[std::make_pair(Op.first.first, Ty.getElementType())];
Entry = std::max(Entry, Ty.getNumElements());
}
TablesInitialized = true;
}
示例6: make_pair
// FIXME: inefficient implementation for now. Without ComputeValueVTs we're
// probably going to need specialized lookup structures for various types before
// we have any hope of doing well with something like <13 x i3>. Even the common
// cases should do better than what we have now.
std::pair<MachineLegalizer::LegalizeAction, LLT>
MachineLegalizer::getAction(const InstrAspect &Aspect) const {
assert(TablesInitialized && "backend forgot to call computeTables");
// These *have* to be implemented for now, they're the fundamental basis of
// how everything else is transformed.
// FIXME: the long-term plan calls for expansion in terms of load/store (if
// they're not legal).
if (Aspect.Opcode == TargetOpcode::G_SEQUENCE ||
Aspect.Opcode == TargetOpcode::G_EXTRACT)
return std::make_pair(Legal, Aspect.Type);
LegalizeAction Action = findInActions(Aspect);
if (Action != NotFound)
return findLegalAction(Aspect, Action);
unsigned Opcode = Aspect.Opcode;
LLT Ty = Aspect.Type;
if (!Ty.isVector()) {
auto DefaultAction = DefaultActions.find(Aspect.Opcode);
if (DefaultAction != DefaultActions.end() && DefaultAction->second == Legal)
return std::make_pair(Legal, Ty);
assert(DefaultAction->second == NarrowScalar && "unexpected default");
return findLegalAction(Aspect, NarrowScalar);
}
LLT EltTy = Ty.getElementType();
int NumElts = Ty.getNumElements();
auto ScalarAction = ScalarInVectorActions.find(std::make_pair(Opcode, EltTy));
if (ScalarAction != ScalarInVectorActions.end() &&
ScalarAction->second != Legal)
return findLegalAction(Aspect, ScalarAction->second);
// The element type is legal in principle, but the number of elements is
// wrong.
auto MaxLegalElts = MaxLegalVectorElts.lookup(std::make_pair(Opcode, EltTy));
if (MaxLegalElts > NumElts)
return findLegalAction(Aspect, MoreElements);
if (MaxLegalElts == 0) {
// Scalarize if there's no legal vector type, which is just a special case
// of FewerElements.
return std::make_pair(FewerElements, EltTy);
}
return findLegalAction(Aspect, FewerElements);
}
示例7: computeTables
void LegalizerInfo::computeTables() {
for (unsigned Opcode = 0; Opcode <= LastOp - FirstOp; ++Opcode) {
for (unsigned Idx = 0; Idx != Actions[Opcode].size(); ++Idx) {
for (auto &Action : Actions[Opcode][Idx]) {
LLT Ty = Action.first;
if (!Ty.isVector())
continue;
auto &Entry = MaxLegalVectorElts[std::make_pair(Opcode + FirstOp,
Ty.getElementType())];
Entry = std::max(Entry, Ty.getNumElements());
}
}
}
TablesInitialized = true;
}
示例8: getInstrMappingImpl
RegisterBankInfo::InstructionMapping
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
// As a top-level guess, vectors go in FPRs, scalars in GPRs. Obviously this
// won't work for normal floating-point types (or NZCV). When such
// instructions exist we'll need to look at the MI's opcode.
LLT Ty = MI.getType();
unsigned BankID;
if (Ty.isVector())
BankID = AArch64::FPRRegBankID;
else
BankID = AArch64::GPRRegBankID;
Mapping = InstructionMapping{1, 1, MI.getNumOperands()};
int Size = Ty.isSized() ? Ty.getSizeInBits() : 0;
for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx)
Mapping.setOperandMapping(Idx, Size, getRegBank(BankID));
return Mapping;
}
示例9: legalizeAddrSpaceCast
bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &MIRBuilder) const {
MachineFunction &MF = MIRBuilder.getMF();
MIRBuilder.setInstr(MI);
unsigned Dst = MI.getOperand(0).getReg();
unsigned Src = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(Dst);
LLT SrcTy = MRI.getType(Src);
unsigned DestAS = DstTy.getAddressSpace();
unsigned SrcAS = SrcTy.getAddressSpace();
// TODO: Avoid reloading from the queue ptr for each cast, or at least each
// vector element.
assert(!DstTy.isVector());
const AMDGPUTargetMachine &TM
= static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BITCAST));
return true;
}
if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
DestAS == AMDGPUAS::PRIVATE_ADDRESS);
unsigned NullVal = TM.getNullPointerValue(DestAS);
auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
unsigned PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
// Extract low 32-bits of the pointer.
MIRBuilder.buildExtract(PtrLo32, Src, 0);
unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
MI.eraseFromParent();
return true;
}
assert(SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
auto SegmentNull =
MIRBuilder.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
auto FlatNull =
MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
unsigned ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
unsigned BuildPtr = MRI.createGenericVirtualRegister(DstTy);
// Coerce the type of the low half of the result so we can use merge_values.
unsigned SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
.addDef(SrcAsInt)
.addUse(Src);
// TODO: Should we allow mismatched types but matching sizes in merges to
// avoid the ptrtoint?
MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
MI.eraseFromParent();
return true;
}
示例10: numElementsNotEven
static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
return [=](const LegalityQuery &Query) {
const LLT QueryTy = Query.Types[TypeIdx];
return QueryTy.isVector() && QueryTy.getNumElements() % 2 != 0;
};
}
示例11: vectorWiderThan
static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
return [=](const LegalityQuery &Query) {
const LLT QueryTy = Query.Types[TypeIdx];
return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
};
}
示例12: getInstrMappingImpl
RegisterBankInfo::InstructionMapping
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
RegisterBankInfo::InstructionMapping Mapping =
InstructionMapping{DefaultMappingID, 1, MI.getNumOperands()};
// Track the size and bank of each register. We don't do partial mappings.
SmallVector<unsigned, 4> OpBaseIdx(MI.getNumOperands());
SmallVector<unsigned, 4> OpFinalIdx(MI.getNumOperands());
for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) {
auto &MO = MI.getOperand(Idx);
if (!MO.isReg())
continue;
LLT Ty = MRI.getType(MO.getReg());
unsigned RBIdx = AArch64::getRegBankBaseIdx(Ty.getSizeInBits());
OpBaseIdx[Idx] = RBIdx;
// As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
// For floating-point instructions, scalars go in FPRs.
if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc)) {
assert(RBIdx < (AArch64::LastFPR - AArch64::FirstFPR) + 1 &&
"Index out of bound");
OpFinalIdx[Idx] = AArch64::FirstFPR + RBIdx;
} else {
assert(RBIdx < (AArch64::LastGPR - AArch64::FirstGPR) + 1 &&
"Index out of bound");
OpFinalIdx[Idx] = AArch64::FirstGPR + RBIdx;
}
}
// Some of the floating-point instructions have mixed GPR and FPR operands:
// fine-tune the computed mapping.
switch (Opc) {
case TargetOpcode::G_SITOFP:
case TargetOpcode::G_UITOFP: {
OpFinalIdx = {OpBaseIdx[0] + AArch64::FirstFPR,
OpBaseIdx[1] + AArch64::FirstGPR};
break;
}
case TargetOpcode::G_FPTOSI:
case TargetOpcode::G_FPTOUI: {
OpFinalIdx = {OpBaseIdx[0] + AArch64::FirstGPR,
OpBaseIdx[1] + AArch64::FirstFPR};
break;
}
case TargetOpcode::G_FCMP: {
OpFinalIdx = {OpBaseIdx[0] + AArch64::FirstGPR, /* Predicate */ 0,
OpBaseIdx[2] + AArch64::FirstFPR,
OpBaseIdx[3] + AArch64::FirstFPR};
break;
}
}
// Finally construct the computed mapping.
for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx)
if (MI.getOperand(Idx).isReg())
Mapping.setOperandMapping(
Idx, ValueMapping{&AArch64::PartMappings[OpFinalIdx[Idx]], 1});
return Mapping;
}
示例13: make_pair
// FIXME: inefficient implementation for now. Without ComputeValueVTs we're
// probably going to need specialized lookup structures for various types before
// we have any hope of doing well with something like <13 x i3>. Even the common
// cases should do better than what we have now.
std::pair<LegalizerInfo::LegalizeAction, LLT>
LegalizerInfo::getAction(const InstrAspect &Aspect) const {
assert(TablesInitialized && "backend forgot to call computeTables");
// These *have* to be implemented for now, they're the fundamental basis of
// how everything else is transformed.
// FIXME: the long-term plan calls for expansion in terms of load/store (if
// they're not legal).
if (Aspect.Opcode == TargetOpcode::G_MERGE_VALUES ||
Aspect.Opcode == TargetOpcode::G_UNMERGE_VALUES)
return std::make_pair(Legal, Aspect.Type);
LLT Ty = Aspect.Type;
LegalizeAction Action = findInActions(Aspect);
// LegalizerHelper is not able to handle non-power-of-2 types right now, so do
// not try to legalize them unless they are marked as Legal or Custom.
// FIXME: This is a temporary hack until the general non-power-of-2
// legalization works.
if (!isPowerOf2_64(Ty.getSizeInBits()) &&
!(Action == Legal || Action == Custom))
return std::make_pair(Unsupported, LLT());
if (Action != NotFound)
return findLegalAction(Aspect, Action);
unsigned Opcode = Aspect.Opcode;
if (!Ty.isVector()) {
auto DefaultAction = DefaultActions.find(Aspect.Opcode);
if (DefaultAction != DefaultActions.end() && DefaultAction->second == Legal)
return std::make_pair(Legal, Ty);
if (DefaultAction != DefaultActions.end() && DefaultAction->second == Lower)
return std::make_pair(Lower, Ty);
if (DefaultAction == DefaultActions.end() ||
DefaultAction->second != NarrowScalar)
return std::make_pair(Unsupported, LLT());
return findLegalAction(Aspect, NarrowScalar);
}
LLT EltTy = Ty.getElementType();
int NumElts = Ty.getNumElements();
auto ScalarAction = ScalarInVectorActions.find(std::make_pair(Opcode, EltTy));
if (ScalarAction != ScalarInVectorActions.end() &&
ScalarAction->second != Legal)
return findLegalAction(Aspect, ScalarAction->second);
// The element type is legal in principle, but the number of elements is
// wrong.
auto MaxLegalElts = MaxLegalVectorElts.lookup(std::make_pair(Opcode, EltTy));
if (MaxLegalElts > NumElts)
return findLegalAction(Aspect, MoreElements);
if (MaxLegalElts == 0) {
// Scalarize if there's no legal vector type, which is just a special case
// of FewerElements.
return std::make_pair(FewerElements, EltTy);
}
return findLegalAction(Aspect, FewerElements);
}
示例14: getSameKindOfOperandsMapping
RegisterBankInfo::InstructionMapping
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
switch (Opc) {
// G_{F|S|U}REM are not listed because they are not legal.
// Arithmetic ops.
case TargetOpcode::G_ADD:
case TargetOpcode::G_SUB:
case TargetOpcode::G_GEP:
case TargetOpcode::G_MUL:
case TargetOpcode::G_SDIV:
case TargetOpcode::G_UDIV:
// Bitwise ops.
case TargetOpcode::G_AND:
case TargetOpcode::G_OR:
case TargetOpcode::G_XOR:
// Shifts.
case TargetOpcode::G_SHL:
case TargetOpcode::G_LSHR:
case TargetOpcode::G_ASHR:
// Floating point ops.
case TargetOpcode::G_FADD:
case TargetOpcode::G_FSUB:
case TargetOpcode::G_FMUL:
case TargetOpcode::G_FDIV:
return getSameKindOfOperandsMapping(MI);
case TargetOpcode::G_BITCAST: {
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
unsigned Size = DstTy.getSizeInBits();
bool DstIsGPR = !DstTy.isVector();
bool SrcIsGPR = !SrcTy.isVector();
const RegisterBank &DstRB =
DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
const RegisterBank &SrcRB =
SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
return InstructionMapping{
DefaultMappingID, copyCost(DstRB, SrcRB, Size),
getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
/*NumOperands*/ 2};
}
case TargetOpcode::G_SEQUENCE:
// FIXME: support this, but the generic code is really not going to do
// anything sane.
return InstructionMapping();
default:
break;
}
unsigned NumOperands = MI.getNumOperands();
// Track the size and bank of each register. We don't do partial mappings.
SmallVector<unsigned, 4> OpSize(NumOperands);
SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
auto &MO = MI.getOperand(Idx);
if (!MO.isReg() || !MO.getReg())
continue;
LLT Ty = MRI.getType(MO.getReg());
OpSize[Idx] = Ty.getSizeInBits();
// As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
// For floating-point instructions, scalars go in FPRs.
if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc))
OpRegBankIdx[Idx] = PMI_FirstFPR;
else
OpRegBankIdx[Idx] = PMI_FirstGPR;
}
unsigned Cost = 1;
// Some of the floating-point instructions have mixed GPR and FPR operands:
// fine-tune the computed mapping.
switch (Opc) {
case TargetOpcode::G_SITOFP:
case TargetOpcode::G_UITOFP:
OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
break;
case TargetOpcode::G_FPTOSI:
case TargetOpcode::G_FPTOUI:
OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
break;
case TargetOpcode::G_FCMP:
OpRegBankIdx = {PMI_FirstGPR,
/* Predicate */ PMI_None, PMI_FirstFPR, PMI_FirstFPR};
break;
case TargetOpcode::G_BITCAST:
// This is going to be a cross register bank copy and this is expensive.
if (OpRegBankIdx[0] != OpRegBankIdx[1])
//.........这里部分代码省略.........
示例15: getSameKindOfOperandsMapping
const RegisterBankInfo::InstructionMapping &
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc) ||
Opc == TargetOpcode::G_PHI) {
const RegisterBankInfo::InstructionMapping &Mapping =
getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
switch (Opc) {
// G_{F|S|U}REM are not listed because they are not legal.
// Arithmetic ops.
case TargetOpcode::G_ADD:
case TargetOpcode::G_SUB:
case TargetOpcode::G_GEP:
case TargetOpcode::G_MUL:
case TargetOpcode::G_SDIV:
case TargetOpcode::G_UDIV:
// Bitwise ops.
case TargetOpcode::G_AND:
case TargetOpcode::G_OR:
case TargetOpcode::G_XOR:
// Shifts.
case TargetOpcode::G_SHL:
case TargetOpcode::G_LSHR:
case TargetOpcode::G_ASHR:
// Floating point ops.
case TargetOpcode::G_FADD:
case TargetOpcode::G_FSUB:
case TargetOpcode::G_FMUL:
case TargetOpcode::G_FDIV:
return getSameKindOfOperandsMapping(MI);
case TargetOpcode::G_BITCAST: {
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
unsigned Size = DstTy.getSizeInBits();
bool DstIsGPR = !DstTy.isVector();
bool SrcIsGPR = !SrcTy.isVector();
const RegisterBank &DstRB =
DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
const RegisterBank &SrcRB =
SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
return getInstructionMapping(
DefaultMappingID, copyCost(DstRB, SrcRB, Size),
getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
/*NumOperands*/ 2);
}
default:
break;
}
unsigned NumOperands = MI.getNumOperands();
// Track the size and bank of each register. We don't do partial mappings.
SmallVector<unsigned, 4> OpSize(NumOperands);
SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
auto &MO = MI.getOperand(Idx);
if (!MO.isReg() || !MO.getReg())
continue;
LLT Ty = MRI.getType(MO.getReg());
OpSize[Idx] = Ty.getSizeInBits();
// As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
// For floating-point instructions, scalars go in FPRs.
if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc) ||
Ty.getSizeInBits() > 64)
OpRegBankIdx[Idx] = PMI_FirstFPR;
else
OpRegBankIdx[Idx] = PMI_FirstGPR;
}
unsigned Cost = 1;
// Some of the floating-point instructions have mixed GPR and FPR operands:
// fine-tune the computed mapping.
switch (Opc) {
case TargetOpcode::G_SITOFP:
case TargetOpcode::G_UITOFP:
OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
break;
case TargetOpcode::G_FPTOSI:
case TargetOpcode::G_FPTOUI:
OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
break;
case TargetOpcode::G_FCMP:
OpRegBankIdx = {PMI_FirstGPR,
/* Predicate */ PMI_None, PMI_FirstFPR, PMI_FirstFPR};
break;
case TargetOpcode::G_BITCAST:
// This is going to be a cross register bank copy and this is expensive.
if (OpRegBankIdx[0] != OpRegBankIdx[1])
Cost = copyCost(
//.........这里部分代码省略.........