summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/utils/TableGen
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/utils/TableGen')
-rw-r--r--contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp129
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp59
-rw-r--r--contrib/llvm/utils/TableGen/Attributes.cpp4
-rw-r--r--contrib/llvm/utils/TableGen/CallingConvEmitter.cpp4
-rw-r--r--contrib/llvm/utils/TableGen/CodeEmitterGen.cpp30
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp145
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h27
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.cpp1
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.h2
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenIntrinsics.h7
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenMapTable.cpp16
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenRegisters.cpp116
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenRegisters.h25
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenSchedule.cpp9
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.cpp62
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.h2
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcher.h70
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp150
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp8
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp24
-rw-r--r--contrib/llvm/utils/TableGen/FastISelEmitter.cpp29
-rw-r--r--contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp19
-rw-r--r--contrib/llvm/utils/TableGen/GlobalISelEmitter.cpp2270
-rw-r--r--contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp12
-rw-r--r--contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp83
-rw-r--r--contrib/llvm/utils/TableGen/OptParserEmitter.cpp44
-rw-r--r--contrib/llvm/utils/TableGen/RegisterBankEmitter.cpp320
-rw-r--r--contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp27
-rw-r--r--contrib/llvm/utils/TableGen/SearchableTableEmitter.cpp14
-rw-r--r--contrib/llvm/utils/TableGen/SubtargetEmitter.cpp30
-rw-r--r--contrib/llvm/utils/TableGen/SubtargetFeatureInfo.cpp68
-rw-r--r--contrib/llvm/utils/TableGen/SubtargetFeatureInfo.h60
-rw-r--r--contrib/llvm/utils/TableGen/TableGen.cpp17
-rw-r--r--contrib/llvm/utils/TableGen/TableGenBackends.h2
-rw-r--r--contrib/llvm/utils/TableGen/Types.cpp1
-rw-r--r--contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp6
-rw-r--r--contrib/llvm/utils/TableGen/X86DisassemblerTables.h2
-rw-r--r--contrib/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp339
-rw-r--r--contrib/llvm/utils/TableGen/X86ModRMFilters.h2
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp362
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.h128
41 files changed, 3874 insertions, 851 deletions
diff --git a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index 1272d2b..1f8e1b1 100644
--- a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -123,9 +123,12 @@ using namespace llvm;
#define DEBUG_TYPE "asm-matcher-emitter"
+cl::OptionCategory AsmMatcherEmitterCat("Options for -gen-asm-matcher");
+
static cl::opt<std::string>
-MatchPrefix("match-prefix", cl::init(""),
- cl::desc("Only match instructions with the given prefix"));
+ MatchPrefix("match-prefix", cl::init(""),
+ cl::desc("Only match instructions with the given prefix"),
+ cl::cat(AsmMatcherEmitterCat));
namespace {
class AsmMatcherInfo;
@@ -351,11 +354,11 @@ public:
class AsmVariantInfo {
public:
- std::string RegisterPrefix;
- std::string TokenizingCharacters;
- std::string SeparatorCharacters;
- std::string BreakCharacters;
- std::string Name;
+ StringRef RegisterPrefix;
+ StringRef TokenizingCharacters;
+ StringRef SeparatorCharacters;
+ StringRef BreakCharacters;
+ StringRef Name;
int AsmVariantNo;
};
@@ -760,7 +763,8 @@ public:
} // end anonymous namespace
-void MatchableInfo::dump() const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void MatchableInfo::dump() const {
errs() << TheDef->getName() << " -- " << "flattened:\"" << AsmString <<"\"\n";
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
@@ -769,6 +773,7 @@ void MatchableInfo::dump() const {
errs() << '\"' << Op.Token << "\"\n";
}
}
+#endif
static std::pair<StringRef, StringRef>
parseTwoOperandConstraint(StringRef S, ArrayRef<SMLoc> Loc) {
@@ -1433,8 +1438,8 @@ void AsmMatcherInfo::buildInfo() {
unsigned VariantCount = Target.getAsmParserVariantCount();
for (unsigned VC = 0; VC != VariantCount; ++VC) {
Record *AsmVariant = Target.getAsmParserVariant(VC);
- std::string CommentDelimiter =
- AsmVariant->getValueAsString("CommentDelimiter");
+ StringRef CommentDelimiter =
+ AsmVariant->getValueAsString("CommentDelimiter");
AsmVariantInfo Variant;
Variant.RegisterPrefix = AsmVariant->getValueAsString("RegisterPrefix");
Variant.TokenizingCharacters =
@@ -1458,7 +1463,7 @@ void AsmMatcherInfo::buildInfo() {
continue;
// Ignore instructions for different instructions
- const std::string V = CGI->TheDef->getValueAsString("AsmVariantName");
+ StringRef V = CGI->TheDef->getValueAsString("AsmVariantName");
if (!V.empty() && V != Variant.Name)
continue;
@@ -1490,7 +1495,7 @@ void AsmMatcherInfo::buildInfo() {
.startswith( MatchPrefix))
continue;
- const std::string V = Alias->TheDef->getValueAsString("AsmVariantName");
+ StringRef V = Alias->TheDef->getValueAsString("AsmVariantName");
if (!V.empty() && V != Variant.Name)
continue;
@@ -1559,8 +1564,8 @@ void AsmMatcherInfo::buildInfo() {
// If the instruction has a two-operand alias, build up the
// matchable here. We'll add them in bulk at the end to avoid
// confusing this loop.
- std::string Constraint =
- II->TheDef->getValueAsString("TwoOperandAliasConstraint");
+ StringRef Constraint =
+ II->TheDef->getValueAsString("TwoOperandAliasConstraint");
if (Constraint != "") {
// Start by making a copy of the original matchable.
auto AliasII = llvm::make_unique<MatchableInfo>(*II);
@@ -1893,10 +1898,10 @@ static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
for (auto &II : Infos) {
// Check if we have a custom match function.
- std::string AsmMatchConverter =
- II->getResultInst()->TheDef->getValueAsString("AsmMatchConverter");
+ StringRef AsmMatchConverter =
+ II->getResultInst()->TheDef->getValueAsString("AsmMatchConverter");
if (!AsmMatchConverter.empty() && II->UseInstAsmMatchConverter) {
- std::string Signature = "ConvertCustom_" + AsmMatchConverter;
+ std::string Signature = ("ConvertCustom_" + AsmMatchConverter).str();
II->ConversionFnKind = Signature;
// Check if we have already generated this signature.
@@ -2217,7 +2222,7 @@ static void emitValidateOperandClass(AsmMatcherInfo &Info,
OS << " switch (Operand.getReg()) {\n";
OS << " default: OpKind = InvalidMatchClass; break;\n";
for (const auto &RC : Info.RegisterClasses)
- OS << " case " << Info.Target.getName() << "::"
+ OS << " case " << RC.first->getValueAsString("Namespace") << "::"
<< RC.first->getName() << ": OpKind = " << RC.second->Name
<< "; break;\n";
OS << " }\n";
@@ -2438,7 +2443,7 @@ static void emitMnemonicAliasVariant(raw_ostream &OS,const AsmMatcherInfo &Info,
for (Record *R : Aliases) {
// FIXME: Allow AssemblerVariantName to be a comma separated list.
- std::string AsmVariantName = R->getValueAsString("AsmVariantName");
+ StringRef AsmVariantName = R->getValueAsString("AsmVariantName");
if (AsmVariantName != AsmParserVariantName)
continue;
AliasesFromMnemonic[R->getValueAsString("FromMnemonic")].push_back(R);
@@ -2481,14 +2486,18 @@ static void emitMnemonicAliasVariant(raw_ostream &OS,const AsmMatcherInfo &Info,
if (!MatchCode.empty())
MatchCode += "else ";
MatchCode += "if ((Features & " + FeatureMask + ") == "+FeatureMask+")\n";
- MatchCode += " Mnemonic = \"" +R->getValueAsString("ToMnemonic")+"\";\n";
+ MatchCode += " Mnemonic = \"";
+ MatchCode += R->getValueAsString("ToMnemonic");
+ MatchCode += "\";\n";
}
if (AliasWithNoPredicate != -1) {
Record *R = ToVec[AliasWithNoPredicate];
if (!MatchCode.empty())
MatchCode += "else\n ";
- MatchCode += "Mnemonic = \"" + R->getValueAsString("ToMnemonic")+"\";\n";
+ MatchCode += "Mnemonic = \"";
+ MatchCode += R->getValueAsString("ToMnemonic");
+ MatchCode += "\";\n";
}
MatchCode += "return;";
@@ -2517,7 +2526,7 @@ static bool emitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info,
for (unsigned VC = 0; VC != VariantCount; ++VC) {
Record *AsmVariant = Target.getAsmParserVariant(VC);
int AsmParserVariantNo = AsmVariant->getValueAsInt("Variant");
- std::string AsmParserVariantName = AsmVariant->getValueAsString("Name");
+ StringRef AsmParserVariantName = AsmVariant->getValueAsString("Name");
OS << " case " << AsmParserVariantNo << ":\n";
emitMnemonicAliasVariant(OS, Info, Aliases, /*Indent=*/2,
AsmParserVariantName);
@@ -2702,10 +2711,51 @@ static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
OS << "}\n\n";
}
+static void emitMnemonicSpellChecker(raw_ostream &OS, CodeGenTarget &Target,
+ unsigned VariantCount) {
+ OS << "std::string " << Target.getName() << "MnemonicSpellCheck(StringRef S, uint64_t FBS) {\n";
+ if (!VariantCount)
+ OS << " return \"\";";
+ else {
+ OS << " const unsigned MaxEditDist = 2;\n";
+ OS << " std::vector<StringRef> Candidates;\n";
+ OS << " StringRef Prev = \"\";\n";
+ OS << " auto End = std::end(MatchTable0);\n";
+ OS << "\n";
+ OS << " for (auto I = std::begin(MatchTable0); I < End; I++) {\n";
+ OS << " // Ignore unsupported instructions.\n";
+ OS << " if ((FBS & I->RequiredFeatures) != I->RequiredFeatures)\n";
+ OS << " continue;\n";
+ OS << "\n";
+ OS << " StringRef T = I->getMnemonic();\n";
+ OS << " // Avoid recomputing the edit distance for the same string.\n";
+ OS << " if (T.equals(Prev))\n";
+ OS << " continue;\n";
+ OS << "\n";
+ OS << " Prev = T;\n";
+ OS << " unsigned Dist = S.edit_distance(T, false, MaxEditDist);\n";
+ OS << " if (Dist <= MaxEditDist)\n";
+ OS << " Candidates.push_back(T);\n";
+ OS << " }\n";
+ OS << "\n";
+ OS << " if (Candidates.empty())\n";
+ OS << " return \"\";\n";
+ OS << "\n";
+ OS << " std::string Res = \", did you mean: \";\n";
+ OS << " unsigned i = 0;\n";
+ OS << " for( ; i < Candidates.size() - 1; i++)\n";
+ OS << " Res += Candidates[i].str() + \", \";\n";
+ OS << " return Res + Candidates[i].str() + \"?\";\n";
+ }
+ OS << "}\n";
+ OS << "\n";
+}
+
+
void AsmMatcherEmitter::run(raw_ostream &OS) {
CodeGenTarget Target(Records);
Record *AsmParser = Target.getAsmParser();
- std::string ClassName = AsmParser->getValueAsString("AsmParserClassName");
+ StringRef ClassName = AsmParser->getValueAsString("AsmParserClassName");
// Compute the information on the instructions to match.
AsmMatcherInfo Info(AsmParser, Target, Records);
@@ -2784,8 +2834,6 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
}
OS << " void convertToMapAndConstraints(unsigned Kind,\n ";
OS << " const OperandVector &Operands) override;\n";
- if (HasMnemonicFirst)
- OS << " bool mnemonicIsValid(StringRef Mnemonic, unsigned VariantID);\n";
OS << " unsigned MatchInstructionImpl(const OperandVector &Operands,\n"
<< " MCInst &Inst,\n"
<< " uint64_t &ErrorInfo,"
@@ -2860,7 +2908,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
emitValidateOperandClass(Info, OS);
// Emit the available features compute function.
- SubtargetFeatureInfo::emitComputeAvailableFeatures(
+ SubtargetFeatureInfo::emitComputeAssemblerAvailableFeatures(
Info.Target.getName(), ClassName, "ComputeAvailableFeatures",
Info.SubtargetFeatures, OS);
@@ -2883,7 +2931,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
StringTable.EmitString(OS);
OS << ";\n\n";
- // Emit the static match table; unused classes get initalized to 0 which is
+ // Emit the static match table; unused classes get initialized to 0 which is
// guaranteed to be InvalidMatchClass.
//
// FIXME: We can reduce the size of this table very easily. First, we change
@@ -2941,7 +2989,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
std::string LenMnemonic = char(MI->Mnemonic.size()) + MI->Mnemonic.str();
OS << " { " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
<< " /* " << MI->Mnemonic << " */, "
- << Target.getName() << "::"
+ << Target.getInstNamespace() << "::"
<< MI->getResultInst()->TheDef->getName() << ", "
<< MI->ConversionFnKind << ", ";
@@ -2967,27 +3015,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << "};\n\n";
}
- // A method to determine if a mnemonic is in the list.
- if (HasMnemonicFirst) {
- OS << "bool " << Target.getName() << ClassName << "::\n"
- << "mnemonicIsValid(StringRef Mnemonic, unsigned VariantID) {\n";
- OS << " // Find the appropriate table for this asm variant.\n";
- OS << " const MatchEntry *Start, *End;\n";
- OS << " switch (VariantID) {\n";
- OS << " default: llvm_unreachable(\"invalid variant!\");\n";
- for (unsigned VC = 0; VC != VariantCount; ++VC) {
- Record *AsmVariant = Target.getAsmParserVariant(VC);
- int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
- OS << " case " << AsmVariantNo << ": Start = std::begin(MatchTable" << VC
- << "); End = std::end(MatchTable" << VC << "); break;\n";
- }
- OS << " }\n";
- OS << " // Search the table.\n";
- OS << " auto MnemonicRange = ";
- OS << "std::equal_range(Start, End, Mnemonic, LessOpcode());\n";
- OS << " return MnemonicRange.first != MnemonicRange.second;\n";
- OS << "}\n\n";
- }
+ emitMnemonicSpellChecker(OS, Target, VariantCount);
// Finally, build the match function.
OS << "unsigned " << Target.getName() << ClassName << "::\n"
@@ -3192,8 +3220,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
<< " }\n\n";
// Call the post-processing function, if used.
- std::string InsnCleanupFn =
- AsmParser->getValueAsString("AsmParserInstCleanup");
+ StringRef InsnCleanupFn = AsmParser->getValueAsString("AsmParserInstCleanup");
if (!InsnCleanupFn.empty())
OS << " " << InsnCleanupFn << "(Inst);\n";
diff --git a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
index a7c6104..75b9bc6 100644
--- a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -137,12 +137,12 @@ static void EmitInstructions(std::vector<AsmWriterInst> &Insts,
O << " switch (MI->getOpcode()) {\n";
O << " default: llvm_unreachable(\"Unexpected opcode.\");\n";
std::vector<std::pair<std::string, AsmWriterOperand>> OpsToPrint;
- OpsToPrint.push_back(std::make_pair(FirstInst.CGI->Namespace + "::" +
+ OpsToPrint.push_back(std::make_pair(FirstInst.CGI->Namespace.str() + "::" +
FirstInst.CGI->TheDef->getName().str(),
FirstInst.Operands[i]));
for (const AsmWriterInst &AWI : SimilarInsts) {
- OpsToPrint.push_back(std::make_pair(AWI.CGI->Namespace+"::" +
+ OpsToPrint.push_back(std::make_pair(AWI.CGI->Namespace.str()+"::" +
AWI.CGI->TheDef->getName().str(),
AWI.Operands[i]));
}
@@ -272,7 +272,7 @@ static void UnescapeString(std::string &Str) {
/// clearing the Instructions vector.
void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
Record *AsmWriter = Target.getAsmWriter();
- std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
+ StringRef ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
bool PassSubtarget = AsmWriter->getValueAsInt("PassSubtarget");
O <<
@@ -523,7 +523,7 @@ emitRegisterNameString(raw_ostream &O, StringRef AltName,
// If the register has an alternate name for this index, use it.
// Otherwise, leave it empty as an error flag.
if (Idx < e) {
- std::vector<std::string> AltNames =
+ std::vector<StringRef> AltNames =
Reg.TheDef->getValueAsListOfStrings("AltNames");
if (AltNames.size() <= Idx)
PrintFatalError(Reg.TheDef->getLoc(),
@@ -553,12 +553,11 @@ emitRegisterNameString(raw_ostream &O, StringRef AltName,
void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
Record *AsmWriter = Target.getAsmWriter();
- std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
+ StringRef ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
const auto &Registers = Target.getRegBank().getRegisters();
const std::vector<Record*> &AltNameIndices = Target.getRegAltNameIndices();
bool hasAltNames = AltNameIndices.size() > 1;
- std::string Namespace =
- Registers.front().TheDef->getValueAsString("Namespace");
+ StringRef Namespace = Registers.front().TheDef->getValueAsString("Namespace");
O <<
"\n\n/// getRegisterName - This method is automatically generated by tblgen\n"
@@ -583,14 +582,16 @@ void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
O << " switch(AltIdx) {\n"
<< " default: llvm_unreachable(\"Invalid register alt name index!\");\n";
for (const Record *R : AltNameIndices) {
- const std::string &AltName = R->getName();
- std::string Prefix = !Namespace.empty() ? Namespace + "::" : "";
- O << " case " << Prefix << AltName << ":\n"
- << " assert(*(AsmStrs" << AltName << "+RegAsmOffset"
- << AltName << "[RegNo-1]) &&\n"
+ StringRef AltName = R->getName();
+ O << " case ";
+ if (!Namespace.empty())
+ O << Namespace << "::";
+ O << AltName << ":\n"
+ << " assert(*(AsmStrs" << AltName << "+RegAsmOffset" << AltName
+ << "[RegNo-1]) &&\n"
<< " \"Invalid alt name index for register!\");\n"
- << " return AsmStrs" << AltName << "+RegAsmOffset"
- << AltName << "[RegNo-1];\n";
+ << " return AsmStrs" << AltName << "+RegAsmOffset" << AltName
+ << "[RegNo-1];\n";
}
O << " }\n";
} else {
@@ -741,7 +742,7 @@ struct AliasPriorityComparator {
if (LHS.second == RHS.second) {
// We don't actually care about the order, but for consistency it
// shouldn't depend on pointer comparisons.
- return LHS.first.TheDef->getName() < RHS.first.TheDef->getName();
+ return LessRecordByID()(LHS.first.TheDef, RHS.first.TheDef);
}
// Aliases with larger priorities should be considered first.
@@ -762,7 +763,7 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
//////////////////////////////
// Emit the method that prints the alias instruction.
- std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
+ StringRef ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
unsigned Variant = AsmWriter->getValueAsInt("Variant");
bool PassSubtarget = AsmWriter->getValueAsInt("PassSubtarget");
@@ -807,16 +808,15 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
IAPrinter IAP(CGA.Result->getAsString(), CGA.AsmString);
- std::string Namespace = Target.getName();
+ StringRef Namespace = Target.getName();
std::vector<Record *> ReqFeatures;
if (PassSubtarget) {
// We only consider ReqFeatures predicates if PassSubtarget
std::vector<Record *> RF =
CGA.TheDef->getValueAsListOfDefs("Predicates");
- std::copy_if(RF.begin(), RF.end(), std::back_inserter(ReqFeatures),
- [](Record *R) {
- return R->getValueAsBit("AssemblerMatcherPredicate");
- });
+ copy_if(RF, std::back_inserter(ReqFeatures), [](Record *R) {
+ return R->getValueAsBit("AssemblerMatcherPredicate");
+ });
}
unsigned NumMIOps = 0;
@@ -846,7 +846,7 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
// code to use.
if (Rec->isSubClassOf("RegisterOperand") ||
Rec->isSubClassOf("Operand")) {
- std::string PrintMethod = Rec->getValueAsString("PrintMethod");
+ StringRef PrintMethod = Rec->getValueAsString("PrintMethod");
if (PrintMethod != "" && PrintMethod != "printOperand") {
PrintMethodIdx =
llvm::find(PrintMethods, PrintMethod) - PrintMethods.begin();
@@ -887,8 +887,9 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
} else
break; // No conditions on this operand at all
}
- Cond = Target.getName().str() + ClassName + "ValidateMCOperand(" +
- Op + ", STI, " + utostr(Entry) + ")";
+ Cond = (Target.getName() + ClassName + "ValidateMCOperand(" + Op +
+ ", STI, " + utostr(Entry) + ")")
+ .str();
}
// for all subcases of ResultOperand::K_Record:
IAP.addCond(Cond);
@@ -924,7 +925,7 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
for (auto I = ReqFeatures.cbegin(); I != ReqFeatures.cend(); I++) {
Record *R = *I;
- std::string AsmCondString = R->getValueAsString("AssemblerCondString");
+ StringRef AsmCondString = R->getValueAsString("AssemblerCondString");
// AsmCondString has syntax [!]F(,[!]F)*
SmallVector<StringRef, 4> Ops;
@@ -934,10 +935,12 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
for (auto &Op : Ops) {
assert(!Op.empty() && "Empty operator");
if (Op[0] == '!')
- Cond = "!STI.getFeatureBits()[" + Namespace + "::" +
- Op.substr(1).str() + "]";
+ Cond = ("!STI.getFeatureBits()[" + Namespace + "::" + Op.substr(1) +
+ "]")
+ .str();
else
- Cond = "STI.getFeatureBits()[" + Namespace + "::" + Op.str() + "]";
+ Cond =
+ ("STI.getFeatureBits()[" + Namespace + "::" + Op + "]").str();
IAP.addCond(Cond);
}
}
diff --git a/contrib/llvm/utils/TableGen/Attributes.cpp b/contrib/llvm/utils/TableGen/Attributes.cpp
index 927f6e0..d64d30e 100644
--- a/contrib/llvm/utils/TableGen/Attributes.cpp
+++ b/contrib/llvm/utils/TableGen/Attributes.cpp
@@ -115,7 +115,7 @@ void Attributes::emitFnAttrCompatCheck(raw_ostream &OS, bool IsStringAttr) {
Records.getAllDerivedDefinitions("CompatRule");
for (auto *Rule : CompatRules) {
- std::string FuncName = Rule->getValueAsString("CompatFunc");
+ StringRef FuncName = Rule->getValueAsString("CompatFunc");
OS << " Ret &= " << FuncName << "(Caller, Callee);\n";
}
@@ -129,7 +129,7 @@ void Attributes::emitFnAttrCompatCheck(raw_ostream &OS, bool IsStringAttr) {
<< " const Function &Callee) {\n";
for (auto *Rule : MergeRules) {
- std::string FuncName = Rule->getValueAsString("MergeFunc");
+ StringRef FuncName = Rule->getValueAsString("MergeFunc");
OS << " " << FuncName << "(Caller, Callee);\n";
}
diff --git a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
index a47662b..013e960 100644
--- a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
@@ -96,7 +96,7 @@ void CallingConvEmitter::EmitAction(Record *Action,
} else if (Action->isSubClassOf("CCIf")) {
O << Action->getValueAsString("Predicate");
} else {
- Action->dump();
+ errs() << *Action;
PrintFatalError("Unknown CCPredicateAction!");
}
@@ -268,7 +268,7 @@ void CallingConvEmitter::EmitAction(Record *Action,
<< "LocVT, LocInfo, ArgFlags, State))\n";
O << IndentStr << IndentStr << "return false;\n";
} else {
- Action->dump();
+ errs() << *Action;
PrintFatalError("Unknown CCAction!");
}
}
diff --git a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
index f34c0de..23751a2 100644
--- a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -187,20 +187,18 @@ AddCodeToMergeInOperand(Record *R, BitsInit *BI, const std::string &VarName,
std::string CodeEmitterGen::getInstructionCase(Record *R,
CodeGenTarget &Target) {
std::string Case;
-
BitsInit *BI = R->getValueAsBitsInit("Inst");
- const std::vector<RecordVal> &Vals = R->getValues();
unsigned NumberedOp = 0;
-
std::set<unsigned> NamedOpIndices;
+
// Collect the set of operand indices that might correspond to named
// operand, and skip these when assigning operands based on position.
if (Target.getInstructionSet()->
getValueAsBit("noNamedPositionallyEncodedOperands")) {
CodeGenInstruction &CGI = Target.getInstruction(R);
- for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+ for (const RecordVal &RV : R->getValues()) {
unsigned OpIdx;
- if (!CGI.Operands.hasOperandNamed(Vals[i].getName(), OpIdx))
+ if (!CGI.Operands.hasOperandNamed(RV.getName(), OpIdx))
continue;
NamedOpIndices.insert(OpIdx);
@@ -209,19 +207,21 @@ std::string CodeEmitterGen::getInstructionCase(Record *R,
// Loop over all of the fields in the instruction, determining which are the
// operands to the instruction.
- for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+ for (const RecordVal &RV : R->getValues()) {
// Ignore fixed fields in the record, we're looking for values like:
// bits<5> RST = { ?, ?, ?, ?, ? };
- if (Vals[i].getPrefix() || Vals[i].getValue()->isComplete())
+ if (RV.getPrefix() || RV.getValue()->isComplete())
continue;
- AddCodeToMergeInOperand(R, BI, Vals[i].getName(), NumberedOp,
+ AddCodeToMergeInOperand(R, BI, RV.getName(), NumberedOp,
NamedOpIndices, Case, Target);
}
-
- std::string PostEmitter = R->getValueAsString("PostEncoderMethod");
+
+ StringRef PostEmitter = R->getValueAsString("PostEncoderMethod");
if (!PostEmitter.empty()) {
- Case += " Value = " + PostEmitter + "(MI, Value";
+ Case += " Value = ";
+ Case += PostEmitter;
+ Case += "(MI, Value";
Case += ", STI";
Case += ");\n";
}
@@ -278,11 +278,11 @@ void CodeEmitterGen::run(raw_ostream &o) {
if (R->getValueAsString("Namespace") == "TargetOpcode" ||
R->getValueAsBit("isPseudo"))
continue;
- const std::string &InstName = R->getValueAsString("Namespace") + "::"
- + R->getName().str();
+ std::string InstName =
+ (R->getValueAsString("Namespace") + "::" + R->getName()).str();
std::string Case = getInstructionCase(R, Target);
- CaseMap[Case].push_back(InstName);
+ CaseMap[Case].push_back(std::move(InstName));
}
// Emit initial function code
@@ -336,7 +336,7 @@ void CodeEmitterGen::run(raw_ostream &o) {
o << "#endif // NDEBUG\n";
// Emit the available features compute function.
- SubtargetFeatureInfo::emitComputeAvailableFeatures(
+ SubtargetFeatureInfo::emitComputeAssemblerAvailableFeatures(
Target.getName(), "MCCodeEmitter", "computeAvailableFeatures",
SubtargetFeatures, o);
diff --git a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
index b82a76b..e48ba38 100644
--- a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -580,56 +580,74 @@ bool EEVT::TypeSet::EnforceVectorSubVectorTypeIs(EEVT::TypeSet &VTOperand,
return MadeChange;
}
-/// EnforceVectorSameNumElts - 'this' is now constrained to
-/// be a vector with same num elements as VTOperand.
-bool EEVT::TypeSet::EnforceVectorSameNumElts(EEVT::TypeSet &VTOperand,
- TreePattern &TP) {
+/// EnforceameNumElts - If VTOperand is a scalar, then 'this' is a scalar. If
+/// VTOperand is a vector, then 'this' must have the same number of elements.
+bool EEVT::TypeSet::EnforceSameNumElts(EEVT::TypeSet &VTOperand,
+ TreePattern &TP) {
if (TP.hasError())
return false;
- // "This" must be a vector and "VTOperand" must be a vector.
bool MadeChange = false;
- MadeChange |= EnforceVector(TP);
- MadeChange |= VTOperand.EnforceVector(TP);
- // If we know one of the vector types, it forces the other type to agree.
+ if (isCompletelyUnknown())
+ MadeChange = FillWithPossibleTypes(TP);
+
+ if (VTOperand.isCompletelyUnknown())
+ MadeChange = VTOperand.FillWithPossibleTypes(TP);
+
+ // If one contains vectors but the other doesn't pull vectors out.
+ if (!hasVectorTypes())
+ MadeChange |= VTOperand.EnforceScalar(TP);
+ else if (!hasScalarTypes())
+ MadeChange |= VTOperand.EnforceVector(TP);
+ if (!VTOperand.hasVectorTypes())
+ MadeChange |= EnforceScalar(TP);
+ else if (!VTOperand.hasScalarTypes())
+ MadeChange |= EnforceVector(TP);
+
+ // If one type is a vector, make sure the other has the same element count.
+ // If this a scalar, then we are already done with the above.
if (isConcrete()) {
MVT IVT = getConcrete();
- unsigned NumElems = IVT.getVectorNumElements();
+ if (IVT.isVector()) {
+ unsigned NumElems = IVT.getVectorNumElements();
- // Only keep types that have same elements as 'this'.
- TypeSet InputSet(VTOperand);
+ // Only keep types that have same elements as 'this'.
+ TypeSet InputSet(VTOperand);
- auto I = remove_if(VTOperand.TypeVec, [NumElems](MVT VVT) {
- return VVT.getVectorNumElements() != NumElems;
- });
- MadeChange |= I != VTOperand.TypeVec.end();
- VTOperand.TypeVec.erase(I, VTOperand.TypeVec.end());
+ auto I = remove_if(VTOperand.TypeVec, [NumElems](MVT VVT) {
+ return VVT.getVectorNumElements() != NumElems;
+ });
+ MadeChange |= I != VTOperand.TypeVec.end();
+ VTOperand.TypeVec.erase(I, VTOperand.TypeVec.end());
- if (VTOperand.TypeVec.empty()) { // FIXME: Really want an SMLoc here!
- TP.error("Type inference contradiction found, forcing '" +
- InputSet.getName() + "' to have same number elements as '" +
- getName() + "'");
- return false;
+ if (VTOperand.TypeVec.empty()) { // FIXME: Really want an SMLoc here!
+ TP.error("Type inference contradiction found, forcing '" +
+ InputSet.getName() + "' to have same number elements as '" +
+ getName() + "'");
+ return false;
+ }
}
} else if (VTOperand.isConcrete()) {
MVT IVT = VTOperand.getConcrete();
- unsigned NumElems = IVT.getVectorNumElements();
+ if (IVT.isVector()) {
+ unsigned NumElems = IVT.getVectorNumElements();
- // Only keep types that have same elements as VTOperand.
- TypeSet InputSet(*this);
+ // Only keep types that have same elements as VTOperand.
+ TypeSet InputSet(*this);
- auto I = remove_if(TypeVec, [NumElems](MVT VVT) {
- return VVT.getVectorNumElements() != NumElems;
- });
- MadeChange |= I != TypeVec.end();
- TypeVec.erase(I, TypeVec.end());
+ auto I = remove_if(TypeVec, [NumElems](MVT VVT) {
+ return VVT.getVectorNumElements() != NumElems;
+ });
+ MadeChange |= I != TypeVec.end();
+ TypeVec.erase(I, TypeVec.end());
- if (TypeVec.empty()) { // FIXME: Really want an SMLoc here!
- TP.error("Type inference contradiction found, forcing '" +
- InputSet.getName() + "' to have same number elements than '" +
- VTOperand.getName() + "'");
- return false;
+ if (TypeVec.empty()) { // FIXME: Really want an SMLoc here!
+ TP.error("Type inference contradiction found, forcing '" +
+ InputSet.getName() + "' to have same number elements than '" +
+ VTOperand.getName() + "'");
+ return false;
+ }
}
}
@@ -644,6 +662,12 @@ bool EEVT::TypeSet::EnforceSameSize(EEVT::TypeSet &VTOperand,
bool MadeChange = false;
+ if (isCompletelyUnknown())
+ MadeChange = FillWithPossibleTypes(TP);
+
+ if (VTOperand.isCompletelyUnknown())
+ MadeChange = VTOperand.FillWithPossibleTypes(TP);
+
// If we know one of the types, it forces the other type agree.
if (isConcrete()) {
MVT IVT = getConcrete();
@@ -869,7 +893,9 @@ std::string PatternToMatch::getPredicateCheck() const {
for (Record *Pred : PredicateRecs) {
if (!PredicateCheck.empty())
PredicateCheck += " && ";
- PredicateCheck += "(" + Pred->getValueAsString("CondString") + ")";
+ PredicateCheck += "(";
+ PredicateCheck += Pred->getValueAsString("CondString");
+ PredicateCheck += ")";
}
return PredicateCheck.str();
@@ -1058,7 +1084,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
getOperandNum(x.SDTCisSameNumEltsAs_Info.OtherOperandNum,
N, NodeInfo, OResNo);
return OtherNode->getExtType(OResNo).
- EnforceVectorSameNumElts(NodeToApply->getExtType(ResNo), TP);
+ EnforceSameNumElts(NodeToApply->getExtType(ResNo), TP);
}
case SDTCisSameSizeAs: {
unsigned OResNo = 0;
@@ -1248,7 +1274,7 @@ static unsigned GetNumNodeResults(Record *Operator, CodeGenDAGPatterns &CDP) {
if (Operator->isSubClassOf("ComplexPattern"))
return 1;
- Operator->dump();
+ errs() << *Operator;
PrintFatalError("Unhandled node in GetNumNodeResults");
}
@@ -2114,7 +2140,7 @@ TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
DagInit *Dag = dyn_cast<DagInit>(TheInit);
if (!Dag) {
- TheInit->dump();
+ TheInit->print(errs());
error("Pattern has unexpected init kind!");
}
DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
@@ -2426,7 +2452,7 @@ void CodeGenDAGPatterns::ParseNodeTransforms() {
while (!Xforms.empty()) {
Record *XFormNode = Xforms.back();
Record *SDNode = XFormNode->getValueAsDef("Opcode");
- std::string Code = XFormNode->getValueAsString("XFormFunction");
+ StringRef Code = XFormNode->getValueAsString("XFormFunction");
SDNodeXForms.insert(std::make_pair(XFormNode, NodeXForm(SDNode, Code)));
Xforms.pop_back();
@@ -2736,8 +2762,8 @@ public:
AnalyzeNode(Pat->getTree(0));
}
- void Analyze(const PatternToMatch *Pat) {
- AnalyzeNode(Pat->getSrcPattern());
+ void Analyze(const PatternToMatch &Pat) {
+ AnalyzeNode(Pat.getSrcPattern());
}
private:
@@ -2804,7 +2830,8 @@ public:
if (IntInfo->ModRef & CodeGenIntrinsic::MR_Mod)
mayStore = true;// Intrinsics that can write to memory are 'mayStore'.
- if (IntInfo->ModRef >= CodeGenIntrinsic::ReadWriteMem)
+ if (IntInfo->ModRef >= CodeGenIntrinsic::ReadWriteMem ||
+ IntInfo->hasSideEffects)
// ReadWriteMem intrinsics can have other strange effects.
hasSideEffects = true;
}
@@ -3193,7 +3220,7 @@ static void FindNames(const TreePatternNode *P,
}
void CodeGenDAGPatterns::AddPatternToMatch(TreePattern *Pattern,
- const PatternToMatch &PTM) {
+ PatternToMatch &&PTM) {
// Do some sanity checking on the pattern we're about to match.
std::string Reason;
if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this)) {
@@ -3232,7 +3259,7 @@ void CodeGenDAGPatterns::AddPatternToMatch(TreePattern *Pattern,
SrcNames[Entry.first].second == 1)
Pattern->error("Pattern has dead named input: $" + Entry.first);
- PatternsToMatch.push_back(PTM);
+ PatternsToMatch.push_back(std::move(PTM));
}
@@ -3262,9 +3289,7 @@ void CodeGenDAGPatterns::InferInstructionFlags() {
// Second, look for single-instruction patterns defined outside the
// instruction.
- for (ptm_iterator I = ptm_begin(), E = ptm_end(); I != E; ++I) {
- const PatternToMatch &PTM = *I;
-
+ for (const PatternToMatch &PTM : ptms()) {
// We can only infer from single-instruction patterns, otherwise we won't
// know which instruction should get the flags.
SmallVector<Record*, 8> PatInstrs;
@@ -3280,7 +3305,7 @@ void CodeGenDAGPatterns::InferInstructionFlags() {
continue;
InstAnalyzer PatInfo(*this);
- PatInfo.Analyze(&PTM);
+ PatInfo.Analyze(PTM);
Errors += InferFromPattern(InstInfo, PatInfo, PTM.getSrcRecord());
}
@@ -3340,7 +3365,7 @@ void CodeGenDAGPatterns::VerifyInstructionFlags() {
// Analyze the source pattern.
InstAnalyzer PatInfo(*this);
- PatInfo.Analyze(&PTM);
+ PatInfo.Analyze(PTM);
// Collect error messages.
SmallVector<std::string, 4> Msgs;
@@ -3526,14 +3551,12 @@ void CodeGenDAGPatterns::ParsePatterns() {
TreePattern Temp(Result.getRecord(), DstPattern, false, *this);
Temp.InferAllTypes();
-
- AddPatternToMatch(Pattern,
- PatternToMatch(CurPattern,
- CurPattern->getValueAsListInit("Predicates"),
- Pattern->getTree(0),
- Temp.getOnlyTree(), InstImpResults,
- CurPattern->getValueAsInt("AddedComplexity"),
- CurPattern->getID()));
+ AddPatternToMatch(
+ Pattern,
+ PatternToMatch(
+ CurPattern, CurPattern->getValueAsListInit("Predicates"),
+ Pattern->getTree(0), Temp.getOnlyTree(), std::move(InstImpResults),
+ CurPattern->getValueAsInt("AddedComplexity"), CurPattern->getID()));
}
}
@@ -3781,9 +3804,7 @@ void CodeGenDAGPatterns::GenerateVariants() {
DepVars);
assert(!Variants.empty() && "Must create at least original variant!");
- Variants.erase(Variants.begin()); // Remove the original pattern.
-
- if (Variants.empty()) // No variants for this pattern.
+ if (Variants.size() == 1) // No additional variants for this pattern.
continue;
DEBUG(errs() << "FOUND VARIANTS OF: ";
@@ -3816,11 +3837,11 @@ void CodeGenDAGPatterns::GenerateVariants() {
if (AlreadyExists) continue;
// Otherwise, add it to the list of patterns we have.
- PatternsToMatch.emplace_back(
+ PatternsToMatch.push_back(PatternToMatch(
PatternsToMatch[i].getSrcRecord(), PatternsToMatch[i].getPredicates(),
Variant, PatternsToMatch[i].getDstPattern(),
PatternsToMatch[i].getDstRegs(),
- PatternsToMatch[i].getAddedComplexity(), Record::getNewUID());
+ PatternsToMatch[i].getAddedComplexity(), Record::getNewUID()));
}
DEBUG(errs() << "\n");
diff --git a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
index 97401cd..8b3e191 100644
--- a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
+++ b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
@@ -144,9 +144,10 @@ namespace EEVT {
/// be a vector type VT.
bool EnforceVectorSubVectorTypeIs(EEVT::TypeSet &VT, TreePattern &TP);
- /// EnforceVectorSameNumElts - 'this' is now constrained to
- /// be a vector with same num elements as VT.
- bool EnforceVectorSameNumElts(EEVT::TypeSet &VT, TreePattern &TP);
+ /// EnforceSameNumElts - If VTOperand is a scalar, then 'this' is a scalar.
+ /// If VTOperand is a vector, then 'this' must have the same number of
+ /// elements.
+ bool EnforceSameNumElts(EEVT::TypeSet &VT, TreePattern &TP);
/// EnforceSameSize - 'this' is now constrained to be the same size as VT.
bool EnforceSameSize(EEVT::TypeSet &VT, TreePattern &TP);
@@ -222,8 +223,8 @@ struct SDTypeConstraint {
/// processing.
class SDNodeInfo {
Record *Def;
- std::string EnumName;
- std::string SDClassName;
+ StringRef EnumName;
+ StringRef SDClassName;
unsigned Properties;
unsigned NumResults;
int NumOperands;
@@ -237,8 +238,8 @@ public:
/// variadic.
int getNumOperands() const { return NumOperands; }
Record *getRecord() const { return Def; }
- const std::string &getEnumName() const { return EnumName; }
- const std::string &getSDClassName() const { return SDClassName; }
+ StringRef getEnumName() const { return EnumName; }
+ StringRef getSDClassName() const { return SDClassName; }
const std::vector<SDTypeConstraint> &getTypeConstraints() const {
return TypeConstraints;
@@ -683,12 +684,12 @@ public:
/// processed to produce isel.
class PatternToMatch {
public:
- PatternToMatch(Record *srcrecord, ListInit *preds,
- TreePatternNode *src, TreePatternNode *dst,
- const std::vector<Record*> &dstregs,
+ PatternToMatch(Record *srcrecord, ListInit *preds, TreePatternNode *src,
+ TreePatternNode *dst, std::vector<Record *> dstregs,
int complexity, unsigned uid)
- : SrcRecord(srcrecord), Predicates(preds), SrcPattern(src), DstPattern(dst),
- Dstregs(dstregs), AddedComplexity(complexity), ID(uid) {}
+ : SrcRecord(srcrecord), Predicates(preds), SrcPattern(src),
+ DstPattern(dst), Dstregs(std::move(dstregs)),
+ AddedComplexity(complexity), ID(uid) {}
Record *SrcRecord; // Originating Record for the pattern.
ListInit *Predicates; // Top level predicate conditions to match.
@@ -852,7 +853,7 @@ private:
void GenerateVariants();
void VerifyInstructionFlags();
- void AddPatternToMatch(TreePattern *Pattern, const PatternToMatch &PTM);
+ void AddPatternToMatch(TreePattern *Pattern, PatternToMatch &&PTM);
void FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
std::map<std::string,
TreePatternNode*> &InstInputs,
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
index bb2ec2a..f4a7609 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
@@ -77,6 +77,7 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
PrintMethod = Rec->getValueAsString("PrintMethod");
OperandType = Rec->getValueAsString("OperandType");
OperandNamespace = Rec->getValueAsString("OperandNamespace");
+ EncoderMethod = Rec->getValueAsString("EncoderMethod");
} else if (Rec->isSubClassOf("Operand")) {
PrintMethod = Rec->getValueAsString("PrintMethod");
OperandType = Rec->getValueAsString("OperandType");
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.h b/contrib/llvm/utils/TableGen/CodeGenInstruction.h
index 75db17b..e173e15 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.h
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.h
@@ -206,7 +206,7 @@ template <typename T> class ArrayRef;
class CodeGenInstruction {
public:
Record *TheDef; // The actual record defining this instruction.
- std::string Namespace; // The namespace the instruction is in.
+ StringRef Namespace; // The namespace the instruction is in.
/// AsmString - The format string used to emit a .s file for the
/// instruction.
diff --git a/contrib/llvm/utils/TableGen/CodeGenIntrinsics.h b/contrib/llvm/utils/TableGen/CodeGenIntrinsics.h
index 6df0e6a..2437412 100644
--- a/contrib/llvm/utils/TableGen/CodeGenIntrinsics.h
+++ b/contrib/llvm/utils/TableGen/CodeGenIntrinsics.h
@@ -123,6 +123,13 @@ struct CodeGenIntrinsic {
/// True if the intrinsic is marked as convergent.
bool isConvergent;
+ /// True if the intrinsic has side effects that aren't captured by any
+ /// of the other flags.
+ bool hasSideEffects;
+
+ // True if the intrinsic is marked as speculatable.
+ bool isSpeculatable;
+
enum ArgAttribute { NoCapture, Returned, ReadOnly, WriteOnly, ReadNone };
std::vector<std::pair<unsigned, ArgAttribute>> ArgumentAttributes;
diff --git a/contrib/llvm/utils/TableGen/CodeGenMapTable.cpp b/contrib/llvm/utils/TableGen/CodeGenMapTable.cpp
index 8032d7b..43348b6 100644
--- a/contrib/llvm/utils/TableGen/CodeGenMapTable.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenMapTable.cpp
@@ -367,7 +367,7 @@ unsigned MapTableEmitter::emitBinSearchTable(raw_ostream &OS) {
ArrayRef<const CodeGenInstruction*> NumberedInstructions =
Target.getInstructionsByEnumValue();
- std::string TargetName = Target.getName();
+ StringRef Namespace = Target.getInstNamespace();
const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
unsigned NumCol = ValueCols.size();
unsigned TotalNumInstr = NumberedInstructions.size();
@@ -387,22 +387,22 @@ unsigned MapTableEmitter::emitBinSearchTable(raw_ostream &OS) {
if (ColInstrs[j] != nullptr) {
RelExists = 1;
OutStr += ", ";
- OutStr += TargetName;
+ OutStr += Namespace;
OutStr += "::";
OutStr += ColInstrs[j]->getName();
} else { OutStr += ", (uint16_t)-1U";}
}
if (RelExists) {
- OS << " { " << TargetName << "::" << CurInstr->getName();
+ OS << " { " << Namespace << "::" << CurInstr->getName();
OS << OutStr <<" },\n";
TableSize++;
}
}
}
if (!TableSize) {
- OS << " { " << TargetName << "::" << "INSTRUCTION_LIST_END, ";
- OS << TargetName << "::" << "INSTRUCTION_LIST_END }";
+ OS << " { " << Namespace << "::" << "INSTRUCTION_LIST_END, ";
+ OS << Namespace << "::" << "INSTRUCTION_LIST_END }";
}
OS << "}; // End of " << InstrMapDesc.getName() << "Table\n\n";
return TableSize;
@@ -567,7 +567,7 @@ namespace llvm {
//===----------------------------------------------------------------------===//
void EmitMapTable(RecordKeeper &Records, raw_ostream &OS) {
CodeGenTarget Target(Records);
- std::string TargetName = Target.getName();
+ StringRef NameSpace = Target.getInstNamespace();
std::vector<Record*> InstrMapVec;
InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping");
@@ -577,7 +577,7 @@ void EmitMapTable(RecordKeeper &Records, raw_ostream &OS) {
OS << "#ifdef GET_INSTRMAP_INFO\n";
OS << "#undef GET_INSTRMAP_INFO\n";
OS << "namespace llvm {\n\n";
- OS << "namespace " << TargetName << " {\n\n";
+ OS << "namespace " << NameSpace << " {\n\n";
// Emit coulumn field names and their values as enums.
emitEnums(OS, Records);
@@ -600,7 +600,7 @@ void EmitMapTable(RecordKeeper &Records, raw_ostream &OS) {
// Emit map tables and the functions to query them.
IMap.emitTablesWithFunc(OS);
}
- OS << "} // End " << TargetName << " namespace\n";
+ OS << "} // End " << NameSpace << " namespace\n";
OS << "} // End llvm namespace\n";
OS << "#endif // GET_INSTRMAP_INFO\n\n";
}
diff --git a/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp b/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
index c03e0d1..6399fb5 100644
--- a/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
@@ -679,11 +679,6 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
Name(R->getName()),
TopoSigs(RegBank.getNumTopoSigs()),
EnumValue(-1) {
- // Rename anonymous register classes.
- if (R->getName().size() > 9 && R->getName()[9] == '.') {
- static unsigned AnonCounter = 0;
- R->setName("AnonRegClass_" + utostr(AnonCounter++));
- }
std::vector<Record*> TypeList = R->getValueAsListOfDefs("RegTypes");
for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
@@ -867,7 +862,7 @@ std::string CodeGenRegisterClass::getQualifiedName() const {
if (Namespace.empty())
return getName();
else
- return Namespace + "::" + getName();
+ return (Namespace + "::" + getName()).str();
}
// Compute sub-classes of all register classes.
@@ -920,6 +915,84 @@ void CodeGenRegisterClass::computeSubClasses(CodeGenRegBank &RegBank) {
RC.inheritProperties(RegBank);
}
+Optional<std::pair<CodeGenRegisterClass *, CodeGenRegisterClass *>>
+CodeGenRegisterClass::getMatchingSubClassWithSubRegs(
+ CodeGenRegBank &RegBank, const CodeGenSubRegIndex *SubIdx) const {
+ auto SizeOrder = [](const CodeGenRegisterClass *A,
+ const CodeGenRegisterClass *B) {
+ return A->getMembers().size() > B->getMembers().size();
+ };
+
+ auto &RegClasses = RegBank.getRegClasses();
+
+ // Find all the subclasses of this one that fully support the sub-register
+ // index and order them by size. BiggestSuperRC should always be first.
+ CodeGenRegisterClass *BiggestSuperRegRC = getSubClassWithSubReg(SubIdx);
+ if (!BiggestSuperRegRC)
+ return None;
+ BitVector SuperRegRCsBV = BiggestSuperRegRC->getSubClasses();
+ std::vector<CodeGenRegisterClass *> SuperRegRCs;
+ for (auto &RC : RegClasses)
+ if (SuperRegRCsBV[RC.EnumValue])
+ SuperRegRCs.emplace_back(&RC);
+ std::sort(SuperRegRCs.begin(), SuperRegRCs.end(), SizeOrder);
+ assert(SuperRegRCs.front() == BiggestSuperRegRC && "Biggest class wasn't first");
+
+ // Find all the subreg classes and order them by size too.
+ std::vector<std::pair<CodeGenRegisterClass *, BitVector>> SuperRegClasses;
+ for (auto &RC: RegClasses) {
+ BitVector SuperRegClassesBV(RegClasses.size());
+ RC.getSuperRegClasses(SubIdx, SuperRegClassesBV);
+ if (SuperRegClassesBV.any())
+ SuperRegClasses.push_back(std::make_pair(&RC, SuperRegClassesBV));
+ }
+ std::sort(SuperRegClasses.begin(), SuperRegClasses.end(),
+ [&](const std::pair<CodeGenRegisterClass *, BitVector> &A,
+ const std::pair<CodeGenRegisterClass *, BitVector> &B) {
+ return SizeOrder(A.first, B.first);
+ });
+
+ // Find the biggest subclass and subreg class such that R:subidx is in the
+ // subreg class for all R in subclass.
+ //
+ // For example:
+ // All registers in X86's GR64 have a sub_32bit subregister but no class
+ // exists that contains all the 32-bit subregisters because GR64 contains RIP
+ // but GR32 does not contain EIP. Instead, we constrain SuperRegRC to
+ // GR32_with_sub_8bit (which is identical to GR32_with_sub_32bit) and then,
+ // having excluded RIP, we are able to find a SubRegRC (GR32).
+ CodeGenRegisterClass *ChosenSuperRegClass = nullptr;
+ CodeGenRegisterClass *SubRegRC = nullptr;
+ for (auto *SuperRegRC : SuperRegRCs) {
+ for (const auto &SuperRegClassPair : SuperRegClasses) {
+ const BitVector &SuperRegClassBV = SuperRegClassPair.second;
+ if (SuperRegClassBV[SuperRegRC->EnumValue]) {
+ SubRegRC = SuperRegClassPair.first;
+ ChosenSuperRegClass = SuperRegRC;
+
+ // If SubRegRC is bigger than SuperRegRC then there are members of
+ // SubRegRC that don't have super registers via SubIdx. Keep looking to
+ // find a better fit and fall back on this one if there isn't one.
+ //
+ // This is intended to prevent X86 from making odd choices such as
+ // picking LOW32_ADDR_ACCESS_RBP instead of GR32 in the example above.
+ // LOW32_ADDR_ACCESS_RBP is a valid choice but contains registers that
+ // aren't subregisters of SuperRegRC whereas GR32 has a direct 1:1
+ // mapping.
+ if (SuperRegRC->getMembers().size() >= SubRegRC->getMembers().size())
+ return std::make_pair(ChosenSuperRegClass, SubRegRC);
+ }
+ }
+
+ // If we found a fit but it wasn't quite ideal because SubRegRC had excess
+ // registers, then we're done.
+ if (ChosenSuperRegClass)
+ return std::make_pair(ChosenSuperRegClass, SubRegRC);
+ }
+
+ return None;
+}
+
void CodeGenRegisterClass::getSuperRegClasses(const CodeGenSubRegIndex *SubIdx,
BitVector &Out) const {
auto FindI = SuperRegClasses.find(SubIdx);
@@ -1195,12 +1268,12 @@ void CodeGenRegBank::computeSubRegLaneMasks() {
CoveringLanes = LaneBitmask::getAll();
for (auto &Idx : SubRegIndices) {
if (Idx.getComposites().empty()) {
- if (Bit > 32) {
+ if (Bit > LaneBitmask::BitWidth) {
PrintFatalError(
Twine("Ran out of lanemask bits to represent subregister ")
+ Idx.getName());
}
- Idx.LaneMask = LaneBitmask(1 << Bit);
+ Idx.LaneMask = LaneBitmask::getLane(Bit);
++Bit;
} else {
Idx.LaneMask = LaneBitmask::getNone();
@@ -1225,9 +1298,9 @@ void CodeGenRegBank::computeSubRegLaneMasks() {
static_assert(sizeof(Idx.LaneMask.getAsInteger()) == 4,
"Change Log2_32 to a proper one");
unsigned DstBit = Log2_32(Idx.LaneMask.getAsInteger());
- assert(Idx.LaneMask == LaneBitmask(1 << DstBit) &&
+ assert(Idx.LaneMask == LaneBitmask::getLane(DstBit) &&
"Must be a leaf subregister");
- MaskRolPair MaskRol = { LaneBitmask(1), (uint8_t)DstBit };
+ MaskRolPair MaskRol = { LaneBitmask::getLane(0), (uint8_t)DstBit };
LaneTransforms.push_back(MaskRol);
} else {
// Go through all leaf subregisters and find the ones that compose with
@@ -1241,7 +1314,7 @@ void CodeGenRegBank::computeSubRegLaneMasks() {
continue;
// Replicate the behaviour from the lane mask generation loop above.
unsigned SrcBit = NextBit;
- LaneBitmask SrcMask = LaneBitmask(1 << SrcBit);
+ LaneBitmask SrcMask = LaneBitmask::getLane(SrcBit);
if (NextBit < LaneBitmask::BitWidth-1)
++NextBit;
assert(Idx2.LaneMask == SrcMask);
@@ -1313,7 +1386,7 @@ void CodeGenRegBank::computeSubRegLaneMasks() {
// For classes without any subregisters set LaneMask to 1 instead of 0.
// This makes it easier for client code to handle classes uniformly.
if (LaneMask.none())
- LaneMask = LaneBitmask(1);
+ LaneMask = LaneBitmask::getLane(0);
RegClass.LaneMask = LaneMask;
}
@@ -1668,7 +1741,7 @@ void CodeGenRegBank::computeRegUnitSets() {
dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name
<< ":";
for (auto &U : RegUnitSets[USIdx].Units)
- dbgs() << " " << RegUnits[U].Roots[0]->getName();
+ printRegUnitName(U);
dbgs() << "\n";
});
@@ -1681,7 +1754,7 @@ void CodeGenRegBank::computeRegUnitSets() {
dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name
<< ":";
for (auto &U : RegUnitSets[USIdx].Units)
- dbgs() << " " << RegUnits[U].Roots[0]->getName();
+ printRegUnitName(U);
dbgs() << "\n";
}
dbgs() << "\nUnion sets:\n");
@@ -1727,7 +1800,7 @@ void CodeGenRegBank::computeRegUnitSets() {
DEBUG(dbgs() << "UnitSet " << RegUnitSets.size()-1
<< " " << RegUnitSets.back().Name << ":";
for (auto &U : RegUnitSets.back().Units)
- dbgs() << " " << RegUnits[U].Roots[0]->getName();
+ printRegUnitName(U);
dbgs() << "\n";);
}
}
@@ -1742,7 +1815,7 @@ void CodeGenRegBank::computeRegUnitSets() {
dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name
<< ":";
for (auto &U : RegUnitSets[USIdx].Units)
- dbgs() << " " << RegUnits[U].Roots[0]->getName();
+ printRegUnitName(U);
dbgs() << "\n";
});
@@ -1763,8 +1836,8 @@ void CodeGenRegBank::computeRegUnitSets() {
continue;
DEBUG(dbgs() << "RC " << RC.getName() << " Units: \n";
- for (auto &U : RCRegUnits)
- dbgs() << RegUnits[U].getRoots()[0]->getName() << " ";
+ for (auto U : RCRegUnits)
+ printRegUnitName(U);
dbgs() << "\n UnitSetIDs:");
// Find all supersets.
@@ -2170,3 +2243,10 @@ BitVector CodeGenRegBank::computeCoveredRegisters(ArrayRef<Record*> Regs) {
BV.set(Set[i]->EnumValue);
return BV;
}
+
+void CodeGenRegBank::printRegUnitName(unsigned Unit) const {
+ if (Unit < NumNativeRegUnits)
+ dbgs() << ' ' << RegUnits[Unit].Roots[0]->getName();
+ else
+ dbgs() << " #" << Unit;
+}
diff --git a/contrib/llvm/utils/TableGen/CodeGenRegisters.h b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
index 3ed26fa..d0f96a0 100644
--- a/contrib/llvm/utils/TableGen/CodeGenRegisters.h
+++ b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
@@ -308,13 +308,13 @@ namespace llvm {
public:
unsigned EnumValue;
- std::string Namespace;
+ StringRef Namespace;
SmallVector<MVT::SimpleValueType, 4> VTs;
unsigned SpillSize;
unsigned SpillAlignment;
int CopyCost;
bool Allocatable;
- std::string AltOrderSelect;
+ StringRef AltOrderSelect;
uint8_t AllocationPriority;
/// Contains the combination of the lane masks of all subregisters.
LaneBitmask LaneMask;
@@ -329,6 +329,9 @@ namespace llvm {
const std::string &getName() const { return Name; }
std::string getQualifiedName() const;
ArrayRef<MVT::SimpleValueType> getValueTypes() const {return VTs;}
+ bool hasValueType(MVT::SimpleValueType VT) const {
+ return std::find(VTs.begin(), VTs.end(), VT) != VTs.end();
+ }
unsigned getNumValueTypes() const { return VTs.size(); }
MVT::SimpleValueType getValueTypeNum(unsigned VTNum) const {
@@ -360,6 +363,18 @@ namespace llvm {
return SubClassWithSubReg.lookup(SubIdx);
}
+ /// Find largest subclass where all registers have SubIdx subregisters in
+ /// SubRegClass and the largest subregister class that contains those
+ /// subregisters without (as far as possible) also containing additional registers.
+ ///
+ /// This can be used to find a suitable pair of classes for subregister copies.
+ /// \return std::pair<SubClass, SubRegClass> where SubClass is a SubClass is
+ /// a class where every register has SubIdx and SubRegClass is a class where
+ /// every register is covered by the SubIdx subregister of SubClass.
+ Optional<std::pair<CodeGenRegisterClass *, CodeGenRegisterClass *>>
+ getMatchingSubClassWithSubRegs(CodeGenRegBank &RegBank,
+ const CodeGenSubRegIndex *SubIdx) const;
+
void setSubClassWithSubReg(const CodeGenSubRegIndex *SubIdx,
CodeGenRegisterClass *SubRC) {
SubClassWithSubReg[SubIdx] = SubRC;
@@ -370,7 +385,7 @@ namespace llvm {
void getSuperRegClasses(const CodeGenSubRegIndex *SubIdx,
BitVector &Out) const;
- // addSuperRegClass - Add a class containing only SudIdx super-registers.
+ // addSuperRegClass - Add a class containing only SubIdx super-registers.
void addSuperRegClass(CodeGenSubRegIndex *SubIdx,
CodeGenRegisterClass *SuperRC) {
SuperRegClasses[SubIdx].insert(SuperRC);
@@ -735,6 +750,10 @@ namespace llvm {
// LaneMask is contained in CoveringLanes will be completely covered by
// another sub-register with the same or larger lane mask.
LaneBitmask CoveringLanes;
+
+ // Helper function for printing debug information. Handles artificial
+ // (non-native) reg units.
+ void printRegUnitName(unsigned Unit) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp b/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp
index cae1cf4..50569b2 100644
--- a/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp
@@ -140,6 +140,7 @@ CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
// Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and
// ProcResourceDefs.
+ DEBUG(dbgs() << "\n+++ RESOURCE DEFINITIONS (collectProcResources) +++\n");
collectProcResources();
checkCompleteness();
@@ -160,6 +161,7 @@ void CodeGenSchedModels::collectProcModels() {
ProcModelMap[NoModelDef] = 0;
// For each processor, find a unique machine model.
+ DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n");
for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i)
addProcModel(ProcRecords[i]);
}
@@ -315,6 +317,7 @@ void CodeGenSchedModels::collectSchedRW() {
RW.Aliases.push_back(*AI);
}
DEBUG(
+ dbgs() << "\n+++ SCHED READS and WRITES (collectSchedRW) +++\n";
for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) {
dbgs() << WIdx << ": ";
SchedWrites[WIdx].dump();
@@ -531,6 +534,7 @@ void CodeGenSchedModels::collectSchedClasses() {
// Create classes for InstRW defs.
RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
std::sort(InstRWDefs.begin(), InstRWDefs.end(), LessRecord());
+ DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n");
for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI)
createInstRWClass(*OI);
@@ -541,8 +545,9 @@ void CodeGenSchedModels::collectSchedClasses() {
if (!EnableDump)
return;
+ dbgs() << "\n+++ ITINERARIES and/or MACHINE MODELS (collectSchedClasses) +++\n";
for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
- std::string InstName = Inst->TheDef->getName();
+ StringRef InstName = Inst->TheDef->getName();
unsigned SCIdx = InstrClassMap.lookup(Inst->TheDef);
if (!SCIdx) {
if (!Inst->hasNoSchedulingInfo)
@@ -790,6 +795,7 @@ bool CodeGenSchedModels::hasItineraries() const {
// Gather the processor itineraries.
void CodeGenSchedModels::collectProcItins() {
+ DEBUG(dbgs() << "\n+++ PROBLEM ITINERARIES (collectProcItins) +++\n");
for (CodeGenProcModel &ProcModel : ProcModels) {
if (!ProcModel.hasItineraries())
continue;
@@ -860,6 +866,7 @@ void CodeGenSchedModels::collectProcUnsupportedFeatures() {
/// Infer new classes from existing classes. In the process, this may create new
/// SchedWrites from sequences of existing SchedWrites.
void CodeGenSchedModels::inferSchedClasses() {
+ DEBUG(dbgs() << "\n+++ INFERRING SCHED CLASSES (inferSchedClasses) +++\n");
DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n");
// Visit all existing classes and newly created classes.
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
index 6503d5a..58df3ce 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -25,13 +25,18 @@
#include <algorithm>
using namespace llvm;
+cl::OptionCategory AsmParserCat("Options for -gen-asm-parser");
+cl::OptionCategory AsmWriterCat("Options for -gen-asm-writer");
+
static cl::opt<unsigned>
-AsmParserNum("asmparsernum", cl::init(0),
- cl::desc("Make -gen-asm-parser emit assembly parser #N"));
+ AsmParserNum("asmparsernum", cl::init(0),
+ cl::desc("Make -gen-asm-parser emit assembly parser #N"),
+ cl::cat(AsmParserCat));
static cl::opt<unsigned>
-AsmWriterNum("asmwriternum", cl::init(0),
- cl::desc("Make -gen-asm-writer emit assembly writer #N"));
+ AsmWriterNum("asmwriternum", cl::init(0),
+ cl::desc("Make -gen-asm-writer emit assembly writer #N"),
+ cl::cat(AsmWriterCat));
/// getValueType - Return the MVT::SimpleValueType that the specified TableGen
/// record corresponds to.
@@ -70,6 +75,7 @@ StringRef llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::x86mmx: return "MVT::x86mmx";
case MVT::Glue: return "MVT::Glue";
case MVT::isVoid: return "MVT::isVoid";
+ case MVT::v1i1: return "MVT::v1i1";
case MVT::v2i1: return "MVT::v2i1";
case MVT::v4i1: return "MVT::v4i1";
case MVT::v8i1: return "MVT::v8i1";
@@ -121,6 +127,46 @@ StringRef llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::v2f64: return "MVT::v2f64";
case MVT::v4f64: return "MVT::v4f64";
case MVT::v8f64: return "MVT::v8f64";
+ case MVT::nxv1i1: return "MVT::nxv1i1";
+ case MVT::nxv2i1: return "MVT::nxv2i1";
+ case MVT::nxv4i1: return "MVT::nxv4i1";
+ case MVT::nxv8i1: return "MVT::nxv8i1";
+ case MVT::nxv16i1: return "MVT::nxv16i1";
+ case MVT::nxv32i1: return "MVT::nxv32i1";
+ case MVT::nxv1i8: return "MVT::nxv1i8";
+ case MVT::nxv2i8: return "MVT::nxv2i8";
+ case MVT::nxv4i8: return "MVT::nxv4i8";
+ case MVT::nxv8i8: return "MVT::nxv8i8";
+ case MVT::nxv16i8: return "MVT::nxv16i8";
+ case MVT::nxv32i8: return "MVT::nxv32i8";
+ case MVT::nxv1i16: return "MVT::nxv1i16";
+ case MVT::nxv2i16: return "MVT::nxv2i16";
+ case MVT::nxv4i16: return "MVT::nxv4i16";
+ case MVT::nxv8i16: return "MVT::nxv8i16";
+ case MVT::nxv16i16: return "MVT::nxv16i16";
+ case MVT::nxv32i16: return "MVT::nxv32i16";
+ case MVT::nxv1i32: return "MVT::nxv1i32";
+ case MVT::nxv2i32: return "MVT::nxv2i32";
+ case MVT::nxv4i32: return "MVT::nxv4i32";
+ case MVT::nxv8i32: return "MVT::nxv8i32";
+ case MVT::nxv16i32: return "MVT::nxv16i32";
+ case MVT::nxv1i64: return "MVT::nxv1i64";
+ case MVT::nxv2i64: return "MVT::nxv2i64";
+ case MVT::nxv4i64: return "MVT::nxv4i64";
+ case MVT::nxv8i64: return "MVT::nxv8i64";
+ case MVT::nxv16i64: return "MVT::nxv16i64";
+ case MVT::nxv2f16: return "MVT::nxv2f16";
+ case MVT::nxv4f16: return "MVT::nxv4f16";
+ case MVT::nxv8f16: return "MVT::nxv8f16";
+ case MVT::nxv1f32: return "MVT::nxv1f32";
+ case MVT::nxv2f32: return "MVT::nxv2f32";
+ case MVT::nxv4f32: return "MVT::nxv4f32";
+ case MVT::nxv8f32: return "MVT::nxv8f32";
+ case MVT::nxv16f32: return "MVT::nxv16f32";
+ case MVT::nxv1f64: return "MVT::nxv1f64";
+ case MVT::nxv2f64: return "MVT::nxv2f64";
+ case MVT::nxv4f64: return "MVT::nxv4f64";
+ case MVT::nxv8f64: return "MVT::nxv8f64";
case MVT::token: return "MVT::token";
case MVT::Metadata: return "MVT::Metadata";
case MVT::iPTR: return "MVT::iPTR";
@@ -161,7 +207,7 @@ const StringRef CodeGenTarget::getName() const {
return TargetRec->getName();
}
-std::string CodeGenTarget::getInstNamespace() const {
+StringRef CodeGenTarget::getInstNamespace() const {
for (const CodeGenInstruction *Inst : getInstructionsByEnumValue()) {
// Make sure not to pick up "TargetOpcode" by accidentally getting
// the namespace off the PHI instruction or something.
@@ -471,6 +517,8 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
isNoReturn = false;
isNoDuplicate = false;
isConvergent = false;
+ isSpeculatable = false;
+ hasSideEffects = false;
if (DefName.size() <= 4 ||
std::string(DefName.begin(), DefName.begin() + 4) != "int_")
@@ -609,6 +657,10 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
isConvergent = true;
else if (Property->getName() == "IntrNoReturn")
isNoReturn = true;
+ else if (Property->getName() == "IntrSpeculatable")
+ isSpeculatable = true;
+ else if (Property->getName() == "IntrHasSideEffects")
+ hasSideEffects = true;
else if (Property->isSubClassOf("NoCapture")) {
unsigned ArgNo = Property->getValueAsInt("ArgNo");
ArgumentAttributes.push_back(std::make_pair(ArgNo, NoCapture));
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.h b/contrib/llvm/utils/TableGen/CodeGenTarget.h
index c822e94..ff624ea 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.h
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.h
@@ -86,7 +86,7 @@ public:
/// getInstNamespace - Return the target-specific instruction namespace.
///
- std::string getInstNamespace() const;
+ StringRef getInstNamespace() const;
/// getInstructionSet - Return the InstructionSet object.
///
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcher.h b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
index 6bda9ca..c672b0a 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcher.h
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
@@ -208,7 +208,7 @@ public:
Children.resize(NC);
}
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == Scope;
}
@@ -233,7 +233,7 @@ public:
const std::string &getWhatFor() const { return WhatFor; }
unsigned getResultNo() const { return ResultNo; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == RecordNode;
}
@@ -265,7 +265,7 @@ public:
const std::string &getWhatFor() const { return WhatFor; }
unsigned getResultNo() const { return ResultNo; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == RecordChild;
}
@@ -281,7 +281,7 @@ class RecordMemRefMatcher : public Matcher {
public:
RecordMemRefMatcher() : Matcher(RecordMemRef) {}
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == RecordMemRef;
}
@@ -297,7 +297,7 @@ class CaptureGlueInputMatcher : public Matcher {
public:
CaptureGlueInputMatcher() : Matcher(CaptureGlueInput) {}
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CaptureGlueInput;
}
@@ -315,7 +315,7 @@ public:
unsigned getChildNo() const { return ChildNo; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == MoveChild;
}
@@ -332,7 +332,7 @@ class MoveParentMatcher : public Matcher {
public:
MoveParentMatcher() : Matcher(MoveParent) {}
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == MoveParent;
}
@@ -352,7 +352,7 @@ public:
unsigned getMatchNumber() const { return MatchNumber; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckSame;
}
@@ -376,7 +376,7 @@ public:
unsigned getChildNo() const { return ChildNo; }
unsigned getMatchNumber() const { return MatchNumber; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckChildSame;
}
@@ -399,7 +399,7 @@ public:
StringRef getPredicate() const { return Predicate; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckPatternPredicate;
}
@@ -419,7 +419,7 @@ public:
TreePredicateFn getPredicate() const;
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckPredicate;
}
@@ -441,7 +441,7 @@ public:
const SDNodeInfo &getOpcode() const { return Opcode; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckOpcode;
}
@@ -462,7 +462,7 @@ public:
: Matcher(SwitchOpcode), Cases(cases.begin(), cases.end()) {}
~SwitchOpcodeMatcher() override;
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == SwitchOpcode;
}
@@ -489,7 +489,7 @@ public:
MVT::SimpleValueType getType() const { return Type; }
unsigned getResNo() const { return ResNo; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckType;
}
@@ -512,7 +512,7 @@ public:
: Matcher(SwitchType), Cases(cases.begin(), cases.end()) {}
~SwitchTypeMatcher() override;
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == SwitchType;
}
@@ -540,7 +540,7 @@ public:
unsigned getChildNo() const { return ChildNo; }
MVT::SimpleValueType getType() const { return Type; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckChildType;
}
@@ -564,7 +564,7 @@ public:
int64_t getValue() const { return Value; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckInteger;
}
@@ -588,7 +588,7 @@ public:
unsigned getChildNo() const { return ChildNo; }
int64_t getValue() const { return Value; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckChildInteger;
}
@@ -611,7 +611,7 @@ public:
StringRef getCondCodeName() const { return CondCodeName; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckCondCode;
}
@@ -632,7 +632,7 @@ public:
StringRef getTypeName() const { return TypeName; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckValueType;
}
@@ -673,7 +673,7 @@ public:
const std::string getName() const { return Name; }
unsigned getFirstResult() const { return FirstResult; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckComplexPat;
}
@@ -695,7 +695,7 @@ public:
int64_t getValue() const { return Value; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckAndImm;
}
@@ -716,7 +716,7 @@ public:
int64_t getValue() const { return Value; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckOrImm;
}
@@ -734,7 +734,7 @@ public:
CheckFoldableChainNodeMatcher()
: Matcher(CheckFoldableChainNode) {}
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CheckFoldableChainNode;
}
@@ -754,7 +754,7 @@ public:
int64_t getValue() const { return Val; }
MVT::SimpleValueType getVT() const { return VT; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == EmitInteger;
}
@@ -778,7 +778,7 @@ public:
const std::string &getValue() const { return Val; }
MVT::SimpleValueType getVT() const { return VT; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == EmitStringInteger;
}
@@ -803,7 +803,7 @@ public:
const CodeGenRegister *getReg() const { return Reg; }
MVT::SimpleValueType getVT() const { return VT; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == EmitRegister;
}
@@ -826,7 +826,7 @@ public:
unsigned getSlot() const { return Slot; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == EmitConvertToTarget;
}
@@ -854,7 +854,7 @@ public:
return ChainNodes[i];
}
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == EmitMergeInputChains;
}
@@ -878,7 +878,7 @@ public:
unsigned getSrcSlot() const { return SrcSlot; }
Record *getDestPhysReg() const { return DestPhysReg; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == EmitCopyToReg;
}
@@ -904,7 +904,7 @@ public:
unsigned getSlot() const { return Slot; }
Record *getNodeXForm() const { return NodeXForm; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == EmitNodeXForm;
}
@@ -964,7 +964,7 @@ public:
bool hasMemRefs() const { return HasMemRefs; }
int getNumFixedArityOperands() const { return NumFixedArityOperands; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == EmitNode || N->getKind() == MorphNodeTo;
}
@@ -991,7 +991,7 @@ public:
unsigned getFirstResultSlot() const { return FirstResultSlot; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == EmitNode;
}
@@ -1015,7 +1015,7 @@ public:
const PatternToMatch &getPattern() const { return Pattern; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == MorphNodeTo;
}
};
@@ -1036,7 +1036,7 @@ public:
unsigned getResult(unsigned R) const { return Results[R]; }
const PatternToMatch &getPattern() const { return Pattern; }
- static inline bool classof(const Matcher *N) {
+ static bool classof(const Matcher *N) {
return N->getKind() == CompleteMatch;
}
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index d30fc51..67e8f15 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -11,14 +11,18 @@
//
//===----------------------------------------------------------------------===//
-#include "DAGISelMatcher.h"
#include "CodeGenDAGPatterns.h"
+#include "DAGISelMatcher.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
using namespace llvm;
@@ -26,10 +30,17 @@ enum {
CommentIndent = 30
};
+cl::OptionCategory DAGISelCat("Options for -gen-dag-isel");
+
// To reduce generated source code size.
-static cl::opt<bool>
-OmitComments("omit-comments", cl::desc("Do not generate comments"),
- cl::init(false));
+static cl::opt<bool> OmitComments("omit-comments",
+ cl::desc("Do not generate comments"),
+ cl::init(false), cl::cat(DAGISelCat));
+
+static cl::opt<bool> InstrumentCoverage(
+ "instrument-coverage",
+ cl::desc("Generates tables to help identify patterns matched"),
+ cl::init(false), cl::cat(DAGISelCat));
namespace {
class MatcherTableEmitter {
@@ -52,6 +63,19 @@ class MatcherTableEmitter {
DenseMap<Record*, unsigned> NodeXFormMap;
std::vector<Record*> NodeXForms;
+ std::vector<std::string> VecIncludeStrings;
+ MapVector<std::string, unsigned, StringMap<unsigned> > VecPatterns;
+
+ unsigned getPatternIdxFromTable(std::string &&P, std::string &&include_loc) {
+ const auto It = VecPatterns.find(P);
+ if (It == VecPatterns.end()) {
+ VecPatterns.insert(make_pair(std::move(P), VecPatterns.size()));
+ VecIncludeStrings.push_back(std::move(include_loc));
+ return VecIncludeStrings.size() - 1;
+ }
+ return It->second;
+ }
+
public:
MatcherTableEmitter(const CodeGenDAGPatterns &cgp)
: CGP(cgp) {}
@@ -62,6 +86,9 @@ public:
void EmitPredicateFunctions(formatted_raw_ostream &OS);
void EmitHistogram(const Matcher *N, formatted_raw_ostream &OS);
+
+ void EmitPatternMatchTable(raw_ostream &OS);
+
private:
unsigned EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
formatted_raw_ostream &OS);
@@ -117,6 +144,14 @@ private:
};
} // end anonymous namespace.
+static std::string GetPatFromTreePatternNode(const TreePatternNode *N) {
+ std::string str;
+ raw_string_ostream Stream(str);
+ Stream << *N;
+ Stream.str();
+ return str;
+}
+
static unsigned GetVBRSize(unsigned Val) {
if (Val <= 127) return 1;
@@ -150,6 +185,56 @@ static uint64_t EmitVBRValue(uint64_t Val, raw_ostream &OS) {
return NumBytes+1;
}
+// This is expensive and slow.
+static std::string getIncludePath(const Record *R) {
+ std::string str;
+ raw_string_ostream Stream(str);
+ auto Locs = R->getLoc();
+ SMLoc L;
+ if (Locs.size() > 1) {
+ // Get where the pattern prototype was instantiated
+ L = Locs[1];
+ } else if (Locs.size() == 1) {
+ L = Locs[0];
+ }
+ unsigned CurBuf = SrcMgr.FindBufferContainingLoc(L);
+ assert(CurBuf && "Invalid or unspecified location!");
+
+ Stream << SrcMgr.getBufferInfo(CurBuf).Buffer->getBufferIdentifier() << ":"
+ << SrcMgr.FindLineNumber(L, CurBuf);
+ Stream.str();
+ return str;
+}
+
+void MatcherTableEmitter::EmitPatternMatchTable(raw_ostream &OS) {
+
+ assert(isUInt<16>(VecPatterns.size()) &&
+ "Using only 16 bits to encode offset into Pattern Table");
+ assert(VecPatterns.size() == VecIncludeStrings.size() &&
+ "The sizes of Pattern and include vectors should be the same");
+ OS << "StringRef getPatternForIndex(unsigned Index) override {\n";
+ OS << "static const char * PATTERN_MATCH_TABLE[] = {\n";
+
+ for (const auto &It : VecPatterns) {
+ OS << "\"" << It.first << "\",\n";
+ }
+
+ OS << "\n};";
+ OS << "\nreturn StringRef(PATTERN_MATCH_TABLE[Index]);";
+ OS << "\n}";
+
+ OS << "\nStringRef getIncludePathForIndex(unsigned Index) override {\n";
+ OS << "static const char * INCLUDE_PATH_TABLE[] = {\n";
+
+ for (const auto &It : VecIncludeStrings) {
+ OS << "\"" << It << "\",\n";
+ }
+
+ OS << "\n};";
+ OS << "\nreturn StringRef(INCLUDE_PATH_TABLE[Index]);";
+ OS << "\n}";
+}
+
/// EmitMatcher - Emit bytes for the specified matcher and return
/// the number of bytes emitted.
unsigned MatcherTableEmitter::
@@ -537,6 +622,23 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
case Matcher::EmitNode:
case Matcher::MorphNodeTo: {
+ auto NumCoveredBytes = 0;
+ if (InstrumentCoverage) {
+ if (const MorphNodeToMatcher *SNT = dyn_cast<MorphNodeToMatcher>(N)) {
+ NumCoveredBytes = 3;
+ OS << "OPC_Coverage, ";
+ std::string src =
+ GetPatFromTreePatternNode(SNT->getPattern().getSrcPattern());
+ std::string dst =
+ GetPatFromTreePatternNode(SNT->getPattern().getDstPattern());
+ Record *PatRecord = SNT->getPattern().getSrcRecord();
+ std::string include_src = getIncludePath(PatRecord);
+ unsigned Offset =
+ getPatternIdxFromTable(src + " -> " + dst, std::move(include_src));
+ OS << "TARGET_VAL(" << Offset << "),\n";
+ OS.PadToColumn(Indent * 2);
+ }
+ }
const EmitNodeMatcherCommon *EN = cast<EmitNodeMatcherCommon>(N);
OS << (isa<EmitNodeMatcher>(EN) ? "OPC_EmitNode" : "OPC_MorphNodeTo");
bool CompressVTs = EN->getNumVTs() < 3;
@@ -593,10 +695,26 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
} else
OS << '\n';
- return 5 + !CompressVTs + EN->getNumVTs() + NumOperandBytes;
+ return 5 + !CompressVTs + EN->getNumVTs() + NumOperandBytes +
+ NumCoveredBytes;
}
case Matcher::CompleteMatch: {
const CompleteMatchMatcher *CM = cast<CompleteMatchMatcher>(N);
+ auto NumCoveredBytes = 0;
+ if (InstrumentCoverage) {
+ NumCoveredBytes = 3;
+ OS << "OPC_Coverage, ";
+ std::string src =
+ GetPatFromTreePatternNode(CM->getPattern().getSrcPattern());
+ std::string dst =
+ GetPatFromTreePatternNode(CM->getPattern().getDstPattern());
+ Record *PatRecord = CM->getPattern().getSrcRecord();
+ std::string include_src = getIncludePath(PatRecord);
+ unsigned Offset =
+ getPatternIdxFromTable(src + " -> " + dst, std::move(include_src));
+ OS << "TARGET_VAL(" << Offset << "),\n";
+ OS.PadToColumn(Indent * 2);
+ }
OS << "OPC_CompleteMatch, " << CM->getNumResults() << ", ";
unsigned NumResultBytes = 0;
for (unsigned i = 0, e = CM->getNumResults(); i != e; ++i)
@@ -610,7 +728,7 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
<< *CM->getPattern().getDstPattern();
}
OS << '\n';
- return 2 + NumResultBytes;
+ return 2 + NumResultBytes + NumCoveredBytes;
}
}
llvm_unreachable("Unreachable");
@@ -686,8 +804,13 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
++NumOps; // Get the chained node too.
OS << " case " << i << ":\n";
+ if (InstrumentCoverage)
+ OS << " {\n";
OS << " Result.resize(NextRes+" << NumOps << ");\n";
- OS << " return " << P.getSelectFunc();
+ if (InstrumentCoverage)
+ OS << " bool Succeeded = " << P.getSelectFunc();
+ else
+ OS << " return " << P.getSelectFunc();
OS << "(";
// If the complex pattern wants the root of the match, pass it in as the
@@ -704,6 +827,13 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
for (unsigned i = 0; i != NumOps; ++i)
OS << ", Result[NextRes+" << i << "].first";
OS << ");\n";
+ if (InstrumentCoverage) {
+ OS << " if (Succeeded)\n";
+ OS << " dbgs() << \"\\nCOMPLEX_PATTERN: " << P.getSelectFunc()
+ << "\\n\" ;\n";
+ OS << " return Succeeded;\n";
+ OS << " }\n";
+ }
}
OS << " }\n";
OS << "}\n\n";
@@ -827,7 +957,7 @@ void llvm::EmitMatcherTable(const Matcher *TheMatcher,
formatted_raw_ostream OS(O);
OS << "// The main instruction selector code.\n";
- OS << "SDNode *SelectCode(SDNode *N) {\n";
+ OS << "void SelectCode(SDNode *N) {\n";
MatcherTableEmitter MatcherEmitter(CGP);
@@ -842,9 +972,11 @@ void llvm::EmitMatcherTable(const Matcher *TheMatcher,
OS << " #undef TARGET_VAL\n";
OS << " SelectCodeCommon(N, MatcherTable,sizeof(MatcherTable));\n";
- OS << " return nullptr;\n";
OS << "}\n";
// Next up, emit the function for node and pattern predicates:
MatcherEmitter.EmitPredicateFunctions(OS);
+
+ if (InstrumentCoverage)
+ MatcherEmitter.EmitPatternMatchTable(OS);
}
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index aafc115..d4a56a6 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -848,8 +848,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
HandledReg = II.ImplicitDefs[0];
- for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
- Record *Reg = Pattern.getDstRegs()[i];
+ for (Record *Reg : Pattern.getDstRegs()) {
if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
ResultVTs.push_back(getRegisterValueType(Reg, CGT));
}
@@ -887,7 +886,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
assert((!ResultVTs.empty() || TreeHasOutGlue || NodeHasChain) &&
"Node has no result");
- AddMatcher(new EmitNodeMatcher(II.Namespace+"::"+II.TheDef->getName().str(),
+ AddMatcher(new EmitNodeMatcher(II.Namespace.str()+"::"+II.TheDef->getName().str(),
ResultVTs, InstOps,
NodeHasChain, TreeHasInGlue, TreeHasOutGlue,
NodeHasMemRefs, NumFixedArityOperands,
@@ -972,8 +971,7 @@ void MatcherGen::EmitResultCode() {
HandledReg = II.ImplicitDefs[0];
}
- for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
- Record *Reg = Pattern.getDstRegs()[i];
+ for (Record *Reg : Pattern.getDstRegs()) {
if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
++NumSrcResults;
}
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
index 783b35e..0bb6568 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
@@ -181,15 +181,21 @@ static Matcher *FindNodeWithKind(Matcher *M, Matcher::KindTy Kind) {
/// ABC
/// XYZ
///
-static void FactorNodes(std::unique_ptr<Matcher> &MatcherPtr) {
- // If we reached the end of the chain, we're done.
- Matcher *N = MatcherPtr.get();
- if (!N) return;
-
- // If this is not a push node, just scan for one.
- ScopeMatcher *Scope = dyn_cast<ScopeMatcher>(N);
- if (!Scope)
- return FactorNodes(N->getNextPtr());
+static void FactorNodes(std::unique_ptr<Matcher> &InputMatcherPtr) {
+ // Look for a push node. Iterates instead of recurses to reduce stack usage.
+ ScopeMatcher *Scope = nullptr;
+ std::unique_ptr<Matcher> *RebindableMatcherPtr = &InputMatcherPtr;
+ while (!Scope) {
+ // If we reached the end of the chain, we're done.
+ Matcher *N = RebindableMatcherPtr->get();
+ if (!N) return;
+
+ // If this is not a push node, just scan for one.
+ Scope = dyn_cast<ScopeMatcher>(N);
+ if (!Scope)
+ RebindableMatcherPtr = &(N->getNextPtr());
+ }
+ std::unique_ptr<Matcher> &MatcherPtr = *RebindableMatcherPtr;
// Okay, pull together the children of the scope node into a vector so we can
// inspect it more easily.
diff --git a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
index 43c6a98..25388b7 100644
--- a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -1,4 +1,4 @@
-//===- FastISelEmitter.cpp - Generate an instruction selector -------------===//
+///===- FastISelEmitter.cpp - Generate an instruction selector -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -390,10 +390,10 @@ class FastISelMap {
std::map<OperandsSignature, std::vector<OperandsSignature> >
SignaturesWithConstantForms;
- std::string InstNS;
+ StringRef InstNS;
ImmPredicateSet ImmediatePredicates;
public:
- explicit FastISelMap(std::string InstNS);
+ explicit FastISelMap(StringRef InstNS);
void collectPatterns(CodeGenDAGPatterns &CGP);
void printImmediatePredicates(raw_ostream &OS);
@@ -417,7 +417,7 @@ static std::string getLegalCName(std::string OpName) {
return OpName;
}
-FastISelMap::FastISelMap(std::string instns) : InstNS(std::move(instns)) {}
+FastISelMap::FastISelMap(StringRef instns) : InstNS(instns) {}
static std::string PhyRegForNode(TreePatternNode *Op,
const CodeGenTarget &Target) {
@@ -440,10 +440,6 @@ static std::string PhyRegForNode(TreePatternNode *Op,
void FastISelMap::collectPatterns(CodeGenDAGPatterns &CGP) {
const CodeGenTarget &Target = CGP.getTargetInfo();
- // Determine the target's namespace name.
- InstNS = Target.getInstNamespace() + "::";
- assert(InstNS.size() > 2 && "Can't determine target-specific namespace!");
-
// Scan through all the patterns and record the simple ones.
for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(),
E = CGP.ptm_end(); I != E; ++I) {
@@ -640,12 +636,9 @@ void FastISelMap::emitInstructionCode(raw_ostream &OS,
OneHadNoPredicate = true;
} else {
if (OneHadNoPredicate) {
- // FIXME: This should be a PrintError once the x86 target
- // fixes PR21575.
- PrintWarning("Multiple instructions match and one with no "
- "predicate came before one with a predicate! "
- "name:" + Memo.Name + " predicate: " +
- PredicateCheck);
+ PrintFatalError("Multiple instructions match and one with no "
+ "predicate came before one with a predicate! "
+ "name:" + Memo.Name + " predicate: " + PredicateCheck);
}
OS << " if (" + PredicateCheck + ") {\n";
OS << " ";
@@ -662,8 +655,8 @@ void FastISelMap::emitInstructionCode(raw_ostream &OS,
if (Memo.SubRegNo.empty()) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs,
ImmediatePredicates, true);
- OS << "(" << InstNS << Memo.Name << ", ";
- OS << "&" << InstNS << Memo.RC->getName() << "RegClass";
+ OS << "(" << InstNS << "::" << Memo.Name << ", ";
+ OS << "&" << InstNS << "::" << Memo.RC->getName() << "RegClass";
if (!Operands.empty())
OS << ", ";
Operands.PrintArguments(OS, *Memo.PhysRegs);
@@ -876,8 +869,8 @@ void EmitFastISel(RecordKeeper &RK, raw_ostream &OS) {
Target.getName().str() + " target", OS);
// Determine the target's namespace name.
- std::string InstNS = Target.getInstNamespace() + "::";
- assert(InstNS.size() > 2 && "Can't determine target-specific namespace!");
+ StringRef InstNS = Target.getInstNamespace();
+ assert(!InstNS.empty() && "Can't determine target-specific namespace!");
FastISelMap F(InstNS);
F.collectPatterns(CGP);
diff --git a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
index e1aaecc..03930d7 100644
--- a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
@@ -1145,16 +1145,15 @@ bool FilterChooser::emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
if (!Pred->getValue("AssemblerMatcherPredicate"))
continue;
- std::string P = Pred->getValueAsString("AssemblerCondString");
+ StringRef P = Pred->getValueAsString("AssemblerCondString");
- if (!P.length())
+ if (P.empty())
continue;
if (!IsFirstEmission)
o << " && ";
- StringRef SR(P);
- std::pair<StringRef, StringRef> pairs = SR.split(',');
+ std::pair<StringRef, StringRef> pairs = P.split(',');
while (!pairs.second.empty()) {
emitSinglePredicateMatch(o, pairs.first, Emitter->PredicateNamespace);
o << " && ";
@@ -1174,9 +1173,9 @@ bool FilterChooser::doesOpcodeNeedPredicate(unsigned Opc) const {
if (!Pred->getValue("AssemblerMatcherPredicate"))
continue;
- std::string P = Pred->getValueAsString("AssemblerCondString");
+ StringRef P = Pred->getValueAsString("AssemblerCondString");
- if (!P.length())
+ if (P.empty())
continue;
return true;
@@ -1692,9 +1691,7 @@ void FilterChooser::emitTableEntries(DecoderTableInfo &TableInfo) const {
dumpStack(errs(), "\t\t");
for (unsigned i = 0; i < Opcodes.size(); ++i) {
- const std::string &Name = nameWithID(Opcodes[i]);
-
- errs() << '\t' << Name << " ";
+ errs() << '\t' << nameWithID(Opcodes[i]) << " ";
dumpBits(errs(),
getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
errs() << '\n';
@@ -1744,7 +1741,7 @@ static bool populateInstruction(CodeGenTarget &Target,
// If the instruction has specified a custom decoding hook, use that instead
// of trying to auto-generate the decoder.
- std::string InstDecoder = Def.getValueAsString("DecoderMethod");
+ StringRef InstDecoder = Def.getValueAsString("DecoderMethod");
if (InstDecoder != "") {
bool HasCompleteInstDecoder = Def.getValueAsBit("hasCompleteDecoder");
InsnOperands.push_back(OperandInfo(InstDecoder, HasCompleteInstDecoder));
@@ -2261,7 +2258,7 @@ void FixedLenDecoderEmitter::run(raw_ostream &o) {
Def->getValueAsBit("isCodeGenOnly"))
continue;
- std::string DecoderNamespace = Def->getValueAsString("DecoderNamespace");
+ StringRef DecoderNamespace = Def->getValueAsString("DecoderNamespace");
if (Size) {
if (populateInstruction(Target, *Inst, i, Operands)) {
diff --git a/contrib/llvm/utils/TableGen/GlobalISelEmitter.cpp b/contrib/llvm/utils/TableGen/GlobalISelEmitter.cpp
index 2bc6181..cafcbeb 100644
--- a/contrib/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -31,326 +31,1979 @@
//===----------------------------------------------------------------------===//
#include "CodeGenDAGPatterns.h"
+#include "SubtargetFeatureInfo.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include "llvm/Support/ScopedPrinter.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <string>
+#include <numeric>
using namespace llvm;
#define DEBUG_TYPE "gisel-emitter"
STATISTIC(NumPatternTotal, "Total number of patterns");
-STATISTIC(NumPatternSkipped, "Number of patterns skipped");
+STATISTIC(NumPatternImported, "Number of patterns imported from SelectionDAG");
+STATISTIC(NumPatternImportsSkipped, "Number of SelectionDAG imports skipped");
STATISTIC(NumPatternEmitted, "Number of patterns emitted");
+/// A unique identifier for a MatchTable.
+static unsigned CurrentMatchTableID = 0;
+
+cl::OptionCategory GlobalISelEmitterCat("Options for -gen-global-isel");
static cl::opt<bool> WarnOnSkippedPatterns(
"warn-on-skipped-patterns",
cl::desc("Explain why a pattern was skipped for inclusion "
"in the GlobalISel selector"),
- cl::init(false));
+ cl::init(false), cl::cat(GlobalISelEmitterCat));
namespace {
+//===- Helper functions ---------------------------------------------------===//
-class GlobalISelEmitter {
-public:
- explicit GlobalISelEmitter(RecordKeeper &RK);
- void run(raw_ostream &OS);
-
+/// This class stands in for LLT wherever we want to tablegen-erate an
+/// equivalent at compiler run-time.
+class LLTCodeGen {
private:
- const RecordKeeper &RK;
- const CodeGenDAGPatterns CGP;
- const CodeGenTarget &Target;
+ LLT Ty;
- /// Keep track of the equivalence between SDNodes and Instruction.
- /// This is defined using 'GINodeEquiv' in the target description.
- DenseMap<Record *, const CodeGenInstruction *> NodeEquivs;
+public:
+ LLTCodeGen(const LLT &Ty) : Ty(Ty) {}
- void gatherNodeEquivs();
- const CodeGenInstruction *findNodeEquiv(Record *N);
+ void emitCxxEnumValue(raw_ostream &OS) const {
+ if (Ty.isScalar()) {
+ OS << "GILLT_s" << Ty.getSizeInBits();
+ return;
+ }
+ if (Ty.isVector()) {
+ OS << "GILLT_v" << Ty.getNumElements() << "s" << Ty.getScalarSizeInBits();
+ return;
+ }
+ llvm_unreachable("Unhandled LLT");
+ }
- struct SkipReason {
- std::string Reason;
- };
+ void emitCxxConstructorCall(raw_ostream &OS) const {
+ if (Ty.isScalar()) {
+ OS << "LLT::scalar(" << Ty.getSizeInBits() << ")";
+ return;
+ }
+ if (Ty.isVector()) {
+ OS << "LLT::vector(" << Ty.getNumElements() << ", "
+ << Ty.getScalarSizeInBits() << ")";
+ return;
+ }
+ llvm_unreachable("Unhandled LLT");
+ }
- /// Analyze pattern \p P, possibly emitting matching code for it to \p OS.
- /// Otherwise, return a reason why this pattern was skipped for emission.
- Optional<SkipReason> runOnPattern(const PatternToMatch &P,
- raw_ostream &OS);
+ const LLT &get() const { return Ty; }
+
+ /// This ordering is used for std::unique() and std::sort(). There's no
+ /// particular logic behind the order.
+ bool operator<(const LLTCodeGen &Other) const {
+ if (!Ty.isValid())
+ return Other.Ty.isValid();
+ if (Ty.isScalar()) {
+ if (!Other.Ty.isValid())
+ return false;
+ if (Other.Ty.isScalar())
+ return Ty.getSizeInBits() < Other.Ty.getSizeInBits();
+ return false;
+ }
+ if (Ty.isVector()) {
+ if (!Other.Ty.isValid() || Other.Ty.isScalar())
+ return false;
+ if (Other.Ty.isVector()) {
+ if (Ty.getNumElements() < Other.Ty.getNumElements())
+ return true;
+ if (Ty.getNumElements() > Other.Ty.getNumElements())
+ return false;
+ return Ty.getSizeInBits() < Other.Ty.getSizeInBits();
+ }
+ return false;
+ }
+ llvm_unreachable("Unhandled LLT");
+ }
};
-} // end anonymous namespace
-
-//===- Helper functions ---------------------------------------------------===//
-
+class InstructionMatcher;
/// Convert an MVT to an equivalent LLT if possible, or the invalid LLT() for
/// MVTs that don't map cleanly to an LLT (e.g., iPTR, *any, ...).
-static Optional<std::string> MVTToLLT(MVT::SimpleValueType SVT) {
- std::string TyStr;
- raw_string_ostream OS(TyStr);
+static Optional<LLTCodeGen> MVTToLLT(MVT::SimpleValueType SVT) {
MVT VT(SVT);
- if (VT.isVector() && VT.getVectorNumElements() != 1) {
- OS << "LLT::vector(" << VT.getVectorNumElements() << ", "
- << VT.getScalarSizeInBits() << ")";
- } else if (VT.isInteger() || VT.isFloatingPoint()) {
- OS << "LLT::scalar(" << VT.getSizeInBits() << ")";
- } else {
- return None;
+ if (VT.isVector() && VT.getVectorNumElements() != 1)
+ return LLTCodeGen(
+ LLT::vector(VT.getVectorNumElements(), VT.getScalarSizeInBits()));
+ if (VT.isInteger() || VT.isFloatingPoint())
+ return LLTCodeGen(LLT::scalar(VT.getSizeInBits()));
+ return None;
+}
+
+static std::string explainPredicates(const TreePatternNode *N) {
+ std::string Explanation = "";
+ StringRef Separator = "";
+ for (const auto &P : N->getPredicateFns()) {
+ Explanation +=
+ (Separator + P.getOrigPatFragRecord()->getRecord()->getName()).str();
+ if (P.isAlwaysTrue())
+ Explanation += " always-true";
+ if (P.isImmediatePattern())
+ Explanation += " immediate";
}
- OS.flush();
- return TyStr;
+ return Explanation;
+}
+
+std::string explainOperator(Record *Operator) {
+ if (Operator->isSubClassOf("SDNode"))
+ return (" (" + Operator->getValueAsString("Opcode") + ")").str();
+
+ if (Operator->isSubClassOf("Intrinsic"))
+ return (" (Operator is an Intrinsic, " + Operator->getName() + ")").str();
+
+ return " (Operator not understood)";
}
-static bool isTrivialOperatorNode(const TreePatternNode *N) {
- return !N->isLeaf() && !N->hasAnyPredicate() && !N->getTransformFn();
+/// Helper function to let the emitter report skip reason error messages.
+static Error failedImport(const Twine &Reason) {
+ return make_error<StringError>(Reason, inconvertibleErrorCode());
}
+static Error isTrivialOperatorNode(const TreePatternNode *N) {
+ std::string Explanation = "";
+ std::string Separator = "";
+ if (N->isLeaf()) {
+ if (isa<IntInit>(N->getLeafValue()))
+ return Error::success();
+
+ Explanation = "Is a leaf";
+ Separator = ", ";
+ }
+
+ if (N->hasAnyPredicate()) {
+ Explanation = Separator + "Has a predicate (" + explainPredicates(N) + ")";
+ Separator = ", ";
+ }
+
+ if (N->getTransformFn()) {
+ Explanation += Separator + "Has a transform function";
+ Separator = ", ";
+ }
+
+ if (!N->isLeaf() && !N->hasAnyPredicate() && !N->getTransformFn())
+ return Error::success();
+
+ return failedImport(Explanation);
+}
+
+static Record *getInitValueAsRegClass(Init *V) {
+ if (DefInit *VDefInit = dyn_cast<DefInit>(V)) {
+ if (VDefInit->getDef()->isSubClassOf("RegisterOperand"))
+ return VDefInit->getDef()->getValueAsDef("RegClass");
+ if (VDefInit->getDef()->isSubClassOf("RegisterClass"))
+ return VDefInit->getDef();
+ }
+ return nullptr;
+}
+
+std::string
+getNameForFeatureBitset(const std::vector<Record *> &FeatureBitset) {
+ std::string Name = "GIFBS";
+ for (const auto &Feature : FeatureBitset)
+ Name += ("_" + Feature->getName()).str();
+ return Name;
+}
//===- Matchers -----------------------------------------------------------===//
-struct Matcher {
- virtual ~Matcher() {}
- virtual void emit(raw_ostream &OS) const = 0;
+class OperandMatcher;
+class MatchAction;
+
+/// Generates code to check that a match rule matches.
+class RuleMatcher {
+ /// A list of matchers that all need to succeed for the current rule to match.
+ /// FIXME: This currently supports a single match position but could be
+ /// extended to support multiple positions to support div/rem fusion or
+ /// load-multiple instructions.
+ std::vector<std::unique_ptr<InstructionMatcher>> Matchers;
+
+ /// A list of actions that need to be taken when all predicates in this rule
+ /// have succeeded.
+ std::vector<std::unique_ptr<MatchAction>> Actions;
+
+ /// A map of instruction matchers to the local variables created by
+ /// emitCaptureOpcodes().
+ std::map<const InstructionMatcher *, unsigned> InsnVariableIDs;
+
+ /// ID for the next instruction variable defined with defineInsnVar()
+ unsigned NextInsnVarID;
+
+ std::vector<Record *> RequiredFeatures;
+
+public:
+ RuleMatcher()
+ : Matchers(), Actions(), InsnVariableIDs(), NextInsnVarID(0) {}
+ RuleMatcher(RuleMatcher &&Other) = default;
+ RuleMatcher &operator=(RuleMatcher &&Other) = default;
+
+ InstructionMatcher &addInstructionMatcher();
+ void addRequiredFeature(Record *Feature);
+ const std::vector<Record *> &getRequiredFeatures() const;
+
+ template <class Kind, class... Args> Kind &addAction(Args &&... args);
+
+ /// Define an instruction without emitting any code to do so.
+ /// This is used for the root of the match.
+ unsigned implicitlyDefineInsnVar(const InstructionMatcher &Matcher);
+ /// Define an instruction and emit corresponding state-machine opcodes.
+ unsigned defineInsnVar(raw_ostream &OS, const InstructionMatcher &Matcher,
+ unsigned InsnVarID, unsigned OpIdx);
+ unsigned getInsnVarID(const InstructionMatcher &InsnMatcher) const;
+
+ void emitCaptureOpcodes(raw_ostream &OS);
+
+ void emit(raw_ostream &OS);
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ bool isHigherPriorityThan(const RuleMatcher &B) const;
+
+ /// Report the maximum number of temporary operands needed by the rule
+ /// matcher.
+ unsigned countRendererFns() const;
+
+ // FIXME: Remove this as soon as possible
+ InstructionMatcher &insnmatcher_front() const { return *Matchers.front(); }
};
-raw_ostream &operator<<(raw_ostream &S, const Matcher &M) {
- M.emit(S);
- return S;
-}
+template <class PredicateTy> class PredicateListMatcher {
+private:
+ typedef std::vector<std::unique_ptr<PredicateTy>> PredicateVec;
+ PredicateVec Predicates;
-struct MatchAction {
- virtual ~MatchAction() {}
- virtual void emit(raw_ostream &OS) const = 0;
+public:
+ /// Construct a new operand predicate and add it to the matcher.
+ template <class Kind, class... Args>
+ Kind &addPredicate(Args&&... args) {
+ Predicates.emplace_back(
+ llvm::make_unique<Kind>(std::forward<Args>(args)...));
+ return *static_cast<Kind *>(Predicates.back().get());
+ }
+
+ typename PredicateVec::const_iterator predicates_begin() const {
+ return Predicates.begin();
+ }
+ typename PredicateVec::const_iterator predicates_end() const {
+ return Predicates.end();
+ }
+ iterator_range<typename PredicateVec::const_iterator> predicates() const {
+ return make_range(predicates_begin(), predicates_end());
+ }
+ typename PredicateVec::size_type predicates_size() const {
+ return Predicates.size();
+ }
+
+ /// Emit MatchTable opcodes that tests whether all the predicates are met.
+ template <class... Args>
+ void emitPredicateListOpcodes(raw_ostream &OS, Args &&... args) const {
+ if (Predicates.empty()) {
+ OS << "// No predicates\n";
+ return;
+ }
+
+ for (const auto &Predicate : predicates())
+ Predicate->emitPredicateOpcodes(OS, std::forward<Args>(args)...);
+ }
};
-raw_ostream &operator<<(raw_ostream &S, const MatchAction &A) {
- A.emit(S);
- return S;
-}
+/// Generates code to check a predicate of an operand.
+///
+/// Typical predicates include:
+/// * Operand is a particular register.
+/// * Operand is assigned a particular register bank.
+/// * Operand is an MBB.
+class OperandPredicateMatcher {
+public:
+ /// This enum is used for RTTI and also defines the priority that is given to
+ /// the predicate when generating the matcher code. Kinds with higher priority
+ /// must be tested first.
+ ///
+ /// The relative priority of OPM_LLT, OPM_RegBank, and OPM_MBB do not matter
+ /// but OPM_Int must have priority over OPM_RegBank since constant integers
+ /// are represented by a virtual register defined by a G_CONSTANT instruction.
+ enum PredicateKind {
+ OPM_ComplexPattern,
+ OPM_Instruction,
+ OPM_IntrinsicID,
+ OPM_Int,
+ OPM_LiteralInt,
+ OPM_LLT,
+ OPM_RegBank,
+ OPM_MBB,
+ };
-struct MatchOpcode : public Matcher {
- MatchOpcode(const CodeGenInstruction *I) : I(I) {}
- const CodeGenInstruction *I;
+protected:
+ PredicateKind Kind;
- virtual void emit(raw_ostream &OS) const {
- OS << "I.getOpcode() == " << I->Namespace << "::" << I->TheDef->getName();
+public:
+ OperandPredicateMatcher(PredicateKind Kind) : Kind(Kind) {}
+ virtual ~OperandPredicateMatcher() {}
+
+ PredicateKind getKind() const { return Kind; }
+
+ /// Return the OperandMatcher for the specified operand or nullptr if there
+ /// isn't one by that name in this operand predicate matcher.
+ ///
+ /// InstructionOperandMatcher is the only subclass that can return non-null
+ /// for this.
+ virtual Optional<const OperandMatcher *>
+ getOptionalOperand(StringRef SymbolicName) const {
+ assert(!SymbolicName.empty() && "Cannot lookup unnamed operand");
+ return None;
}
+
+ /// Emit MatchTable opcodes to capture instructions into the MIs table.
+ ///
+ /// Only InstructionOperandMatcher needs to do anything for this method the
+ /// rest just walk the tree.
+ virtual void emitCaptureOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const {}
+
+ /// Emit MatchTable opcodes that check the predicate for the given operand.
+ virtual void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID,
+ unsigned OpIdx) const = 0;
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ virtual bool isHigherPriorityThan(const OperandPredicateMatcher &B) const {
+ return Kind < B.Kind;
+ };
+
+ /// Report the maximum number of temporary operands needed by the predicate
+ /// matcher.
+ virtual unsigned countRendererFns() const { return 0; }
};
-struct MatchRegOpType : public Matcher {
- MatchRegOpType(unsigned OpIdx, std::string Ty)
- : OpIdx(OpIdx), Ty(Ty) {}
- unsigned OpIdx;
- std::string Ty;
+/// Generates code to check that an operand is a particular LLT.
+class LLTOperandMatcher : public OperandPredicateMatcher {
+protected:
+ LLTCodeGen Ty;
- virtual void emit(raw_ostream &OS) const {
- OS << "MRI.getType(I.getOperand(" << OpIdx << ").getReg()) == (" << Ty
- << ")";
+public:
+ LLTOperandMatcher(const LLTCodeGen &Ty)
+ : OperandPredicateMatcher(OPM_LLT), Ty(Ty) {}
+
+ static bool classof(const OperandPredicateMatcher *P) {
+ return P->getKind() == OPM_LLT;
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckType, /*MI*/" << InsnVarID << ", /*Op*/" << OpIdx
+ << ", /*Type*/";
+ Ty.emitCxxEnumValue(OS);
+ OS << ", \n";
}
};
-struct MatchRegOpBank : public Matcher {
- MatchRegOpBank(unsigned OpIdx, const CodeGenRegisterClass &RC)
- : OpIdx(OpIdx), RC(RC) {}
- unsigned OpIdx;
+/// Generates code to check that an operand is a particular target constant.
+class ComplexPatternOperandMatcher : public OperandPredicateMatcher {
+protected:
+ const OperandMatcher &Operand;
+ const Record &TheDef;
+
+ unsigned getAllocatedTemporariesBaseID() const;
+
+public:
+ ComplexPatternOperandMatcher(const OperandMatcher &Operand,
+ const Record &TheDef)
+ : OperandPredicateMatcher(OPM_ComplexPattern), Operand(Operand),
+ TheDef(TheDef) {}
+
+ static bool classof(const OperandPredicateMatcher *P) {
+ return P->getKind() == OPM_ComplexPattern;
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ unsigned ID = getAllocatedTemporariesBaseID();
+ OS << " GIM_CheckComplexPattern, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", /*Renderer*/" << ID << ", GICP_"
+ << TheDef.getName() << ",\n";
+ }
+
+ unsigned countRendererFns() const override {
+ return 1;
+ }
+};
+
+/// Generates code to check that an operand is in a particular register bank.
+class RegisterBankOperandMatcher : public OperandPredicateMatcher {
+protected:
const CodeGenRegisterClass &RC;
- virtual void emit(raw_ostream &OS) const {
- OS << "(&RBI.getRegBankFromRegClass(" << RC.getQualifiedName()
- << "RegClass) == RBI.getRegBank(I.getOperand(" << OpIdx
- << ").getReg(), MRI, TRI))";
+public:
+ RegisterBankOperandMatcher(const CodeGenRegisterClass &RC)
+ : OperandPredicateMatcher(OPM_RegBank), RC(RC) {}
+
+ static bool classof(const OperandPredicateMatcher *P) {
+ return P->getKind() == OPM_RegBank;
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckRegBankForClass, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", /*RC*/" << RC.getQualifiedName() << "RegClassID,\n";
+ }
+};
+
+/// Generates code to check that an operand is a basic block.
+class MBBOperandMatcher : public OperandPredicateMatcher {
+public:
+ MBBOperandMatcher() : OperandPredicateMatcher(OPM_MBB) {}
+
+ static bool classof(const OperandPredicateMatcher *P) {
+ return P->getKind() == OPM_MBB;
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckIsMBB, /*MI*/" << InsnVarID << ", /*Op*/" << OpIdx << ",\n";
+ }
+};
+
+/// Generates code to check that an operand is a G_CONSTANT with a particular
+/// int.
+class ConstantIntOperandMatcher : public OperandPredicateMatcher {
+protected:
+ int64_t Value;
+
+public:
+ ConstantIntOperandMatcher(int64_t Value)
+ : OperandPredicateMatcher(OPM_Int), Value(Value) {}
+
+ static bool classof(const OperandPredicateMatcher *P) {
+ return P->getKind() == OPM_Int;
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckConstantInt, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", " << Value << ",\n";
+ }
+};
+
+/// Generates code to check that an operand is a raw int (where MO.isImm() or
+/// MO.isCImm() is true).
+class LiteralIntOperandMatcher : public OperandPredicateMatcher {
+protected:
+ int64_t Value;
+
+public:
+ LiteralIntOperandMatcher(int64_t Value)
+ : OperandPredicateMatcher(OPM_LiteralInt), Value(Value) {}
+
+ static bool classof(const OperandPredicateMatcher *P) {
+ return P->getKind() == OPM_LiteralInt;
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckLiteralInt, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", " << Value << ",\n";
+ }
+};
+
+/// Generates code to check that an operand is an intrinsic ID.
+class IntrinsicIDOperandMatcher : public OperandPredicateMatcher {
+protected:
+ const CodeGenIntrinsic *II;
+
+public:
+ IntrinsicIDOperandMatcher(const CodeGenIntrinsic *II)
+ : OperandPredicateMatcher(OPM_IntrinsicID), II(II) {}
+
+ static bool classof(const OperandPredicateMatcher *P) {
+ return P->getKind() == OPM_IntrinsicID;
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID, unsigned OpIdx) const override {
+ OS << " GIM_CheckIntrinsicID, /*MI*/" << InsnVarID << ", /*Op*/"
+ << OpIdx << ", Intrinsic::" << II->EnumName << ",\n";
}
};
-struct MatchMBBOp : public Matcher {
- MatchMBBOp(unsigned OpIdx) : OpIdx(OpIdx) {}
+/// Generates code to check that a set of predicates match for a particular
+/// operand.
+class OperandMatcher : public PredicateListMatcher<OperandPredicateMatcher> {
+protected:
+ InstructionMatcher &Insn;
unsigned OpIdx;
+ std::string SymbolicName;
+
+ /// The index of the first temporary variable allocated to this operand. The
+ /// number of allocated temporaries can be found with
+ /// countRendererFns().
+ unsigned AllocatedTemporariesBaseID;
+
+public:
+ OperandMatcher(InstructionMatcher &Insn, unsigned OpIdx,
+ const std::string &SymbolicName,
+ unsigned AllocatedTemporariesBaseID)
+ : Insn(Insn), OpIdx(OpIdx), SymbolicName(SymbolicName),
+ AllocatedTemporariesBaseID(AllocatedTemporariesBaseID) {}
+
+ bool hasSymbolicName() const { return !SymbolicName.empty(); }
+ const StringRef getSymbolicName() const { return SymbolicName; }
+ void setSymbolicName(StringRef Name) {
+ assert(SymbolicName.empty() && "Operand already has a symbolic name");
+ SymbolicName = Name;
+ }
+ unsigned getOperandIndex() const { return OpIdx; }
+
+ std::string getOperandExpr(unsigned InsnVarID) const {
+ return "State.MIs[" + llvm::to_string(InsnVarID) + "]->getOperand(" +
+ llvm::to_string(OpIdx) + ")";
+ }
+
+ Optional<const OperandMatcher *>
+ getOptionalOperand(StringRef DesiredSymbolicName) const {
+ assert(!DesiredSymbolicName.empty() && "Cannot lookup unnamed operand");
+ if (DesiredSymbolicName == SymbolicName)
+ return this;
+ for (const auto &OP : predicates()) {
+ const auto &MaybeOperand = OP->getOptionalOperand(DesiredSymbolicName);
+ if (MaybeOperand.hasValue())
+ return MaybeOperand.getValue();
+ }
+ return None;
+ }
+
+ InstructionMatcher &getInstructionMatcher() const { return Insn; }
+
+ /// Emit MatchTable opcodes to capture instructions into the MIs table.
+ void emitCaptureOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const {
+ for (const auto &Predicate : predicates())
+ Predicate->emitCaptureOpcodes(OS, Rule, InsnVarID, OpIdx);
+ }
- virtual void emit(raw_ostream &OS) const {
- OS << "I.getOperand(" << OpIdx << ").isMBB()";
+ /// Emit MatchTable opcodes that test whether the instruction named in
+ /// InsnVarID matches all the predicates and all the operands.
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const {
+ OS << " // MIs[" << InsnVarID << "] ";
+ if (SymbolicName.empty())
+ OS << "Operand " << OpIdx;
+ else
+ OS << SymbolicName;
+ OS << "\n";
+ emitPredicateListOpcodes(OS, Rule, InsnVarID, OpIdx);
+ }
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ bool isHigherPriorityThan(const OperandMatcher &B) const {
+ // Operand matchers involving more predicates have higher priority.
+ if (predicates_size() > B.predicates_size())
+ return true;
+ if (predicates_size() < B.predicates_size())
+ return false;
+
+ // This assumes that predicates are added in a consistent order.
+ for (const auto &Predicate : zip(predicates(), B.predicates())) {
+ if (std::get<0>(Predicate)->isHigherPriorityThan(*std::get<1>(Predicate)))
+ return true;
+ if (std::get<1>(Predicate)->isHigherPriorityThan(*std::get<0>(Predicate)))
+ return false;
+ }
+
+ return false;
+ };
+
+ /// Report the maximum number of temporary operands needed by the operand
+ /// matcher.
+ unsigned countRendererFns() const {
+ return std::accumulate(
+ predicates().begin(), predicates().end(), 0,
+ [](unsigned A,
+ const std::unique_ptr<OperandPredicateMatcher> &Predicate) {
+ return A + Predicate->countRendererFns();
+ });
+ }
+
+ unsigned getAllocatedTemporariesBaseID() const {
+ return AllocatedTemporariesBaseID;
}
};
-struct MutateOpcode : public MatchAction {
- MutateOpcode(const CodeGenInstruction *I) : I(I) {}
+unsigned ComplexPatternOperandMatcher::getAllocatedTemporariesBaseID() const {
+ return Operand.getAllocatedTemporariesBaseID();
+}
+
+/// Generates code to check a predicate on an instruction.
+///
+/// Typical predicates include:
+/// * The opcode of the instruction is a particular value.
+/// * The nsw/nuw flag is/isn't set.
+class InstructionPredicateMatcher {
+protected:
+ /// This enum is used for RTTI and also defines the priority that is given to
+ /// the predicate when generating the matcher code. Kinds with higher priority
+ /// must be tested first.
+ enum PredicateKind {
+ IPM_Opcode,
+ };
+
+ PredicateKind Kind;
+
+public:
+ InstructionPredicateMatcher(PredicateKind Kind) : Kind(Kind) {}
+ virtual ~InstructionPredicateMatcher() {}
+
+ PredicateKind getKind() const { return Kind; }
+
+ /// Emit MatchTable opcodes that test whether the instruction named in
+ /// InsnVarID matches the predicate.
+ virtual void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const = 0;
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ virtual bool
+ isHigherPriorityThan(const InstructionPredicateMatcher &B) const {
+ return Kind < B.Kind;
+ };
+
+ /// Report the maximum number of temporary operands needed by the predicate
+ /// matcher.
+ virtual unsigned countRendererFns() const { return 0; }
+};
+
+/// Generates code to check the opcode of an instruction.
+class InstructionOpcodeMatcher : public InstructionPredicateMatcher {
+protected:
const CodeGenInstruction *I;
- virtual void emit(raw_ostream &OS) const {
- OS << "I.setDesc(TII.get(" << I->Namespace << "::" << I->TheDef->getName()
- << "));";
+public:
+ InstructionOpcodeMatcher(const CodeGenInstruction *I)
+ : InstructionPredicateMatcher(IPM_Opcode), I(I) {}
+
+ static bool classof(const InstructionPredicateMatcher *P) {
+ return P->getKind() == IPM_Opcode;
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const override {
+ OS << " GIM_CheckOpcode, /*MI*/" << InsnVarID << ", " << I->Namespace
+ << "::" << I->TheDef->getName() << ",\n";
+ }
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ bool
+ isHigherPriorityThan(const InstructionPredicateMatcher &B) const override {
+ if (InstructionPredicateMatcher::isHigherPriorityThan(B))
+ return true;
+ if (B.InstructionPredicateMatcher::isHigherPriorityThan(*this))
+ return false;
+
+ // Prioritize opcodes for cosmetic reasons in the generated source. Although
+ // this is cosmetic at the moment, we may want to drive a similar ordering
+ // using instruction frequency information to improve compile time.
+ if (const InstructionOpcodeMatcher *BO =
+ dyn_cast<InstructionOpcodeMatcher>(&B))
+ return I->TheDef->getName() < BO->I->TheDef->getName();
+
+ return false;
+ };
+};
+
+/// Generates code to check that a set of predicates and operands match for a
+/// particular instruction.
+///
+/// Typical predicates include:
+/// * Has a specific opcode.
+/// * Has an nsw/nuw flag or doesn't.
+class InstructionMatcher
+ : public PredicateListMatcher<InstructionPredicateMatcher> {
+protected:
+ typedef std::vector<std::unique_ptr<OperandMatcher>> OperandVec;
+
+ /// The operands to match. All rendered operands must be present even if the
+ /// condition is always true.
+ OperandVec Operands;
+
+public:
+ /// Add an operand to the matcher.
+ OperandMatcher &addOperand(unsigned OpIdx, const std::string &SymbolicName,
+ unsigned AllocatedTemporariesBaseID) {
+ Operands.emplace_back(new OperandMatcher(*this, OpIdx, SymbolicName,
+ AllocatedTemporariesBaseID));
+ return *Operands.back();
+ }
+
+ OperandMatcher &getOperand(unsigned OpIdx) {
+ auto I = std::find_if(Operands.begin(), Operands.end(),
+ [&OpIdx](const std::unique_ptr<OperandMatcher> &X) {
+ return X->getOperandIndex() == OpIdx;
+ });
+ if (I != Operands.end())
+ return **I;
+ llvm_unreachable("Failed to lookup operand");
+ }
+
+ Optional<const OperandMatcher *>
+ getOptionalOperand(StringRef SymbolicName) const {
+ assert(!SymbolicName.empty() && "Cannot lookup unnamed operand");
+ for (const auto &Operand : Operands) {
+ const auto &OM = Operand->getOptionalOperand(SymbolicName);
+ if (OM.hasValue())
+ return OM.getValue();
+ }
+ return None;
+ }
+
+ const OperandMatcher &getOperand(StringRef SymbolicName) const {
+ Optional<const OperandMatcher *>OM = getOptionalOperand(SymbolicName);
+ if (OM.hasValue())
+ return *OM.getValue();
+ llvm_unreachable("Failed to lookup operand");
+ }
+
+ unsigned getNumOperands() const { return Operands.size(); }
+ OperandVec::iterator operands_begin() { return Operands.begin(); }
+ OperandVec::iterator operands_end() { return Operands.end(); }
+ iterator_range<OperandVec::iterator> operands() {
+ return make_range(operands_begin(), operands_end());
+ }
+ OperandVec::const_iterator operands_begin() const { return Operands.begin(); }
+ OperandVec::const_iterator operands_end() const { return Operands.end(); }
+ iterator_range<OperandVec::const_iterator> operands() const {
+ return make_range(operands_begin(), operands_end());
+ }
+
+ /// Emit MatchTable opcodes to check the shape of the match and capture
+ /// instructions into the MIs table.
+ void emitCaptureOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnID) {
+ OS << " GIM_CheckNumOperands, /*MI*/" << InsnID << ", /*Expected*/"
+ << getNumOperands() << ",\n";
+ for (const auto &Operand : Operands)
+ Operand->emitCaptureOpcodes(OS, Rule, InsnID);
+ }
+
+ /// Emit MatchTable opcodes that test whether the instruction named in
+ /// InsnVarName matches all the predicates and all the operands.
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID) const {
+ emitPredicateListOpcodes(OS, Rule, InsnVarID);
+ for (const auto &Operand : Operands)
+ Operand->emitPredicateOpcodes(OS, Rule, InsnVarID);
+ }
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ bool isHigherPriorityThan(const InstructionMatcher &B) const {
+ // Instruction matchers involving more operands have higher priority.
+ if (Operands.size() > B.Operands.size())
+ return true;
+ if (Operands.size() < B.Operands.size())
+ return false;
+
+ for (const auto &Predicate : zip(predicates(), B.predicates())) {
+ if (std::get<0>(Predicate)->isHigherPriorityThan(*std::get<1>(Predicate)))
+ return true;
+ if (std::get<1>(Predicate)->isHigherPriorityThan(*std::get<0>(Predicate)))
+ return false;
+ }
+
+ for (const auto &Operand : zip(Operands, B.Operands)) {
+ if (std::get<0>(Operand)->isHigherPriorityThan(*std::get<1>(Operand)))
+ return true;
+ if (std::get<1>(Operand)->isHigherPriorityThan(*std::get<0>(Operand)))
+ return false;
+ }
+
+ return false;
+ };
+
+ /// Report the maximum number of temporary operands needed by the instruction
+ /// matcher.
+ unsigned countRendererFns() const {
+ return std::accumulate(predicates().begin(), predicates().end(), 0,
+ [](unsigned A,
+ const std::unique_ptr<InstructionPredicateMatcher>
+ &Predicate) {
+ return A + Predicate->countRendererFns();
+ }) +
+ std::accumulate(
+ Operands.begin(), Operands.end(), 0,
+ [](unsigned A, const std::unique_ptr<OperandMatcher> &Operand) {
+ return A + Operand->countRendererFns();
+ });
}
};
-class MatcherEmitter {
+/// Generates code to check that the operand is a register defined by an
+/// instruction that matches the given instruction matcher.
+///
+/// For example, the pattern:
+/// (set $dst, (G_MUL (G_ADD $src1, $src2), $src3))
+/// would use an InstructionOperandMatcher for operand 1 of the G_MUL to match
+/// the:
+/// (G_ADD $src1, $src2)
+/// subpattern.
+class InstructionOperandMatcher : public OperandPredicateMatcher {
+protected:
+ std::unique_ptr<InstructionMatcher> InsnMatcher;
+
+public:
+ InstructionOperandMatcher()
+ : OperandPredicateMatcher(OPM_Instruction),
+ InsnMatcher(new InstructionMatcher()) {}
+
+ static bool classof(const OperandPredicateMatcher *P) {
+ return P->getKind() == OPM_Instruction;
+ }
+
+ InstructionMatcher &getInsnMatcher() const { return *InsnMatcher; }
+
+ Optional<const OperandMatcher *>
+ getOptionalOperand(StringRef SymbolicName) const override {
+ assert(!SymbolicName.empty() && "Cannot lookup unnamed operand");
+ return InsnMatcher->getOptionalOperand(SymbolicName);
+ }
+
+ void emitCaptureOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnID, unsigned OpIdx) const override {
+ unsigned InsnVarID = Rule.defineInsnVar(OS, *InsnMatcher, InsnID, OpIdx);
+ InsnMatcher->emitCaptureOpcodes(OS, Rule, InsnVarID);
+ }
+
+ void emitPredicateOpcodes(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned InsnVarID_,
+ unsigned OpIdx_) const override {
+ unsigned InsnVarID = Rule.getInsnVarID(*InsnMatcher);
+ InsnMatcher->emitPredicateOpcodes(OS, Rule, InsnVarID);
+ }
+};
+
+//===- Actions ------------------------------------------------------------===//
+class OperandRenderer {
+public:
+ enum RendererKind {
+ OR_Copy,
+ OR_CopySubReg,
+ OR_Imm,
+ OR_Register,
+ OR_ComplexPattern
+ };
+
+protected:
+ RendererKind Kind;
+
+public:
+ OperandRenderer(RendererKind Kind) : Kind(Kind) {}
+ virtual ~OperandRenderer() {}
+
+ RendererKind getKind() const { return Kind; }
+
+ virtual void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const = 0;
+};
+
+/// A CopyRenderer emits code to copy a single operand from an existing
+/// instruction to the one being built.
+class CopyRenderer : public OperandRenderer {
+protected:
+ unsigned NewInsnID;
+ /// The matcher for the instruction that this operand is copied from.
+ /// This provides the facility for looking up an a operand by it's name so
+ /// that it can be used as a source for the instruction being built.
+ const InstructionMatcher &Matched;
+ /// The name of the operand.
+ const StringRef SymbolicName;
+
+public:
+ CopyRenderer(unsigned NewInsnID, const InstructionMatcher &Matched,
+ StringRef SymbolicName)
+ : OperandRenderer(OR_Copy), NewInsnID(NewInsnID), Matched(Matched),
+ SymbolicName(SymbolicName) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_Copy;
+ }
+
+ const StringRef getSymbolicName() const { return SymbolicName; }
+
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
+ const OperandMatcher &Operand = Matched.getOperand(SymbolicName);
+ unsigned OldInsnVarID = Rule.getInsnVarID(Operand.getInstructionMatcher());
+ OS << " GIR_Copy, /*NewInsnID*/" << NewInsnID << ", /*OldInsnID*/"
+ << OldInsnVarID << ", /*OpIdx*/" << Operand.getOperandIndex() << ", // "
+ << SymbolicName << "\n";
+ }
+};
+
+/// A CopySubRegRenderer emits code to copy a single register operand from an
+/// existing instruction to the one being built and indicate that only a
+/// subregister should be copied.
+class CopySubRegRenderer : public OperandRenderer {
+protected:
+ unsigned NewInsnID;
+ /// The matcher for the instruction that this operand is copied from.
+ /// This provides the facility for looking up an a operand by it's name so
+ /// that it can be used as a source for the instruction being built.
+ const InstructionMatcher &Matched;
+ /// The name of the operand.
+ const StringRef SymbolicName;
+ /// The subregister to extract.
+ const CodeGenSubRegIndex *SubReg;
+
+public:
+ CopySubRegRenderer(unsigned NewInsnID, const InstructionMatcher &Matched,
+ StringRef SymbolicName, const CodeGenSubRegIndex *SubReg)
+ : OperandRenderer(OR_CopySubReg), NewInsnID(NewInsnID), Matched(Matched),
+ SymbolicName(SymbolicName), SubReg(SubReg) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_CopySubReg;
+ }
+
+ const StringRef getSymbolicName() const { return SymbolicName; }
+
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
+ const OperandMatcher &Operand = Matched.getOperand(SymbolicName);
+ unsigned OldInsnVarID = Rule.getInsnVarID(Operand.getInstructionMatcher());
+ OS << " GIR_CopySubReg, /*NewInsnID*/" << NewInsnID
+ << ", /*OldInsnID*/" << OldInsnVarID << ", /*OpIdx*/"
+ << Operand.getOperandIndex() << ", /*SubRegIdx*/" << SubReg->EnumValue
+ << ", // " << SymbolicName << "\n";
+ }
+};
+
+/// Adds a specific physical register to the instruction being built.
+/// This is typically useful for WZR/XZR on AArch64.
+class AddRegisterRenderer : public OperandRenderer {
+protected:
+ unsigned InsnID;
+ const Record *RegisterDef;
+
+public:
+ AddRegisterRenderer(unsigned InsnID, const Record *RegisterDef)
+ : OperandRenderer(OR_Register), InsnID(InsnID), RegisterDef(RegisterDef) {
+ }
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_Register;
+ }
+
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
+ OS << " GIR_AddRegister, /*InsnID*/" << InsnID << ", "
+ << (RegisterDef->getValue("Namespace")
+ ? RegisterDef->getValueAsString("Namespace")
+ : "")
+ << "::" << RegisterDef->getName() << ",\n";
+ }
+};
+
+/// Adds a specific immediate to the instruction being built.
+class ImmRenderer : public OperandRenderer {
+protected:
+ unsigned InsnID;
+ int64_t Imm;
+
+public:
+ ImmRenderer(unsigned InsnID, int64_t Imm)
+ : OperandRenderer(OR_Imm), InsnID(InsnID), Imm(Imm) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_Imm;
+ }
+
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
+ OS << " GIR_AddImm, /*InsnID*/" << InsnID << ", /*Imm*/" << Imm
+ << ",\n";
+ }
+};
+
+/// Adds operands by calling a renderer function supplied by the ComplexPattern
+/// matcher function.
+class RenderComplexPatternOperand : public OperandRenderer {
+private:
+ unsigned InsnID;
+ const Record &TheDef;
+ /// The name of the operand.
+ const StringRef SymbolicName;
+ /// The renderer number. This must be unique within a rule since it's used to
+ /// identify a temporary variable to hold the renderer function.
+ unsigned RendererID;
+
+ unsigned getNumOperands() const {
+ return TheDef.getValueAsDag("Operands")->getNumArgs();
+ }
+
+public:
+ RenderComplexPatternOperand(unsigned InsnID, const Record &TheDef,
+ StringRef SymbolicName, unsigned RendererID)
+ : OperandRenderer(OR_ComplexPattern), InsnID(InsnID), TheDef(TheDef),
+ SymbolicName(SymbolicName), RendererID(RendererID) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_ComplexPattern;
+ }
+
+ void emitRenderOpcodes(raw_ostream &OS, RuleMatcher &Rule) const override {
+ OS << " GIR_ComplexRenderer, /*InsnID*/" << InsnID << ", /*RendererID*/"
+ << RendererID << ",\n";
+ }
+};
+
+/// An action taken when all Matcher predicates succeeded for a parent rule.
+///
+/// Typical actions include:
+/// * Changing the opcode of an instruction.
+/// * Adding an operand to an instruction.
+class MatchAction {
+public:
+ virtual ~MatchAction() {}
+
+ /// Emit the C++ statements to implement the action.
+ ///
+ /// \param RecycleInsnID If given, it's an instruction to recycle. The
+ /// requirements on the instruction vary from action to
+ /// action.
+ virtual void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned RecycleInsnID) const = 0;
+};
+
+/// Generates a comment describing the matched rule being acted upon.
+class DebugCommentAction : public MatchAction {
+private:
const PatternToMatch &P;
public:
- std::vector<std::unique_ptr<Matcher>> Matchers;
- std::vector<std::unique_ptr<MatchAction>> Actions;
+ DebugCommentAction(const PatternToMatch &P) : P(P) {}
- MatcherEmitter(const PatternToMatch &P) : P(P) {}
+ void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned RecycleInsnID) const override {
+ OS << " // " << *P.getSrcPattern() << " => " << *P.getDstPattern()
+ << "\n";
+ }
+};
- void emit(raw_ostream &OS) {
- if (Matchers.empty())
- llvm_unreachable("Unexpected empty matcher!");
+/// Generates code to build an instruction or mutate an existing instruction
+/// into the desired instruction when this is possible.
+class BuildMIAction : public MatchAction {
+private:
+ unsigned InsnID;
+ const CodeGenInstruction *I;
+ const InstructionMatcher &Matched;
+ std::vector<std::unique_ptr<OperandRenderer>> OperandRenderers;
+
+ /// True if the instruction can be built solely by mutating the opcode.
+ bool canMutate() const {
+ if (OperandRenderers.size() != Matched.getNumOperands())
+ return false;
+
+ for (const auto &Renderer : enumerate(OperandRenderers)) {
+ if (const auto *Copy = dyn_cast<CopyRenderer>(&*Renderer.value())) {
+ const OperandMatcher &OM = Matched.getOperand(Copy->getSymbolicName());
+ if (&Matched != &OM.getInstructionMatcher() ||
+ OM.getOperandIndex() != Renderer.index())
+ return false;
+ } else
+ return false;
+ }
- OS << " // Src: " << *P.getSrcPattern() << "\n"
- << " // Dst: " << *P.getDstPattern() << "\n";
+ return true;
+ }
- OS << " if ((" << *Matchers.front() << ")";
- for (auto &MA : makeArrayRef(Matchers).drop_front())
- OS << " &&\n (" << *MA << ")";
- OS << ") {\n";
+public:
+ BuildMIAction(unsigned InsnID, const CodeGenInstruction *I,
+ const InstructionMatcher &Matched)
+ : InsnID(InsnID), I(I), Matched(Matched) {}
+
+ template <class Kind, class... Args>
+ Kind &addRenderer(Args&&... args) {
+ OperandRenderers.emplace_back(
+ llvm::make_unique<Kind>(std::forward<Args>(args)...));
+ return *static_cast<Kind *>(OperandRenderers.back().get());
+ }
- for (auto &MA : Actions)
- OS << " " << *MA << "\n";
+ void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned RecycleInsnID) const override {
+ if (canMutate()) {
+ OS << " GIR_MutateOpcode, /*InsnID*/" << InsnID
+ << ", /*RecycleInsnID*/ " << RecycleInsnID << ", /*Opcode*/"
+ << I->Namespace << "::" << I->TheDef->getName() << ",\n";
+
+ if (!I->ImplicitDefs.empty() || !I->ImplicitUses.empty()) {
+ for (auto Def : I->ImplicitDefs) {
+ auto Namespace = Def->getValue("Namespace")
+ ? Def->getValueAsString("Namespace")
+ : "";
+ OS << " GIR_AddImplicitDef, " << InsnID << ", " << Namespace
+ << "::" << Def->getName() << ",\n";
+ }
+ for (auto Use : I->ImplicitUses) {
+ auto Namespace = Use->getValue("Namespace")
+ ? Use->getValueAsString("Namespace")
+ : "";
+ OS << " GIR_AddImplicitUse, " << InsnID << ", " << Namespace
+ << "::" << Use->getName() << ",\n";
+ }
+ }
+ return;
+ }
- OS << " constrainSelectedInstRegOperands(I, TII, TRI, RBI);\n";
- OS << " return true;\n";
- OS << " }\n";
+ // TODO: Simple permutation looks like it could be almost as common as
+ // mutation due to commutative operations.
+
+ OS << " GIR_BuildMI, /*InsnID*/" << InsnID << ", /*Opcode*/"
+ << I->Namespace << "::" << I->TheDef->getName() << ",\n";
+ for (const auto &Renderer : OperandRenderers)
+ Renderer->emitRenderOpcodes(OS, Rule);
+
+ OS << " GIR_MergeMemOperands, /*InsnID*/" << InsnID << ",\n"
+ << " GIR_EraseFromParent, /*InsnID*/" << RecycleInsnID << ",\n";
+ }
+};
+
+/// Generates code to constrain the operands of an output instruction to the
+/// register classes specified by the definition of that instruction.
+class ConstrainOperandsToDefinitionAction : public MatchAction {
+ unsigned InsnID;
+
+public:
+ ConstrainOperandsToDefinitionAction(unsigned InsnID) : InsnID(InsnID) {}
+
+ void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned RecycleInsnID) const override {
+ OS << " GIR_ConstrainSelectedInstOperands, /*InsnID*/" << InsnID << ",\n";
+ }
+};
+
+/// Generates code to constrain the specified operand of an output instruction
+/// to the specified register class.
+class ConstrainOperandToRegClassAction : public MatchAction {
+ unsigned InsnID;
+ unsigned OpIdx;
+ const CodeGenRegisterClass &RC;
+
+public:
+ ConstrainOperandToRegClassAction(unsigned InsnID, unsigned OpIdx,
+ const CodeGenRegisterClass &RC)
+ : InsnID(InsnID), OpIdx(OpIdx), RC(RC) {}
+
+ void emitCxxActionStmts(raw_ostream &OS, RuleMatcher &Rule,
+ unsigned RecycleInsnID) const override {
+ OS << " GIR_ConstrainOperandRC, /*InsnID*/" << InsnID << ", /*Op*/"
+ << OpIdx << ", /*RC " << RC.getName() << "*/ " << RC.EnumValue << ",\n";
}
};
+InstructionMatcher &RuleMatcher::addInstructionMatcher() {
+ Matchers.emplace_back(new InstructionMatcher());
+ return *Matchers.back();
+}
+
+void RuleMatcher::addRequiredFeature(Record *Feature) {
+ RequiredFeatures.push_back(Feature);
+}
+
+const std::vector<Record *> &RuleMatcher::getRequiredFeatures() const {
+ return RequiredFeatures;
+}
+
+template <class Kind, class... Args>
+Kind &RuleMatcher::addAction(Args &&... args) {
+ Actions.emplace_back(llvm::make_unique<Kind>(std::forward<Args>(args)...));
+ return *static_cast<Kind *>(Actions.back().get());
+}
+
+unsigned
+RuleMatcher::implicitlyDefineInsnVar(const InstructionMatcher &Matcher) {
+ unsigned NewInsnVarID = NextInsnVarID++;
+ InsnVariableIDs[&Matcher] = NewInsnVarID;
+ return NewInsnVarID;
+}
+
+unsigned RuleMatcher::defineInsnVar(raw_ostream &OS,
+ const InstructionMatcher &Matcher,
+ unsigned InsnID, unsigned OpIdx) {
+ unsigned NewInsnVarID = implicitlyDefineInsnVar(Matcher);
+ OS << " GIM_RecordInsn, /*DefineMI*/" << NewInsnVarID << ", /*MI*/"
+ << InsnID << ", /*OpIdx*/" << OpIdx << ", // MIs[" << NewInsnVarID
+ << "]\n";
+ return NewInsnVarID;
+}
+
+unsigned RuleMatcher::getInsnVarID(const InstructionMatcher &InsnMatcher) const {
+ const auto &I = InsnVariableIDs.find(&InsnMatcher);
+ if (I != InsnVariableIDs.end())
+ return I->second;
+ llvm_unreachable("Matched Insn was not captured in a local variable");
+}
+
+/// Emit MatchTable opcodes to check the shape of the match and capture
+/// instructions into local variables.
+void RuleMatcher::emitCaptureOpcodes(raw_ostream &OS) {
+ assert(Matchers.size() == 1 && "Cannot handle multi-root matchers yet");
+ unsigned InsnVarID = implicitlyDefineInsnVar(*Matchers.front());
+ Matchers.front()->emitCaptureOpcodes(OS, *this, InsnVarID);
+}
+
+void RuleMatcher::emit(raw_ostream &OS) {
+ if (Matchers.empty())
+ llvm_unreachable("Unexpected empty matcher!");
+
+ // The representation supports rules that require multiple roots such as:
+ // %ptr(p0) = ...
+ // %elt0(s32) = G_LOAD %ptr
+ // %1(p0) = G_ADD %ptr, 4
+ // %elt1(s32) = G_LOAD p0 %1
+ // which could be usefully folded into:
+ // %ptr(p0) = ...
+ // %elt0(s32), %elt1(s32) = TGT_LOAD_PAIR %ptr
+ // on some targets but we don't need to make use of that yet.
+ assert(Matchers.size() == 1 && "Cannot handle multi-root matchers yet");
+
+ OS << " const static int64_t MatchTable" << CurrentMatchTableID << "[] = {\n";
+ if (!RequiredFeatures.empty()) {
+ OS << " GIM_CheckFeatures, " << getNameForFeatureBitset(RequiredFeatures)
+ << ",\n";
+ }
+
+ emitCaptureOpcodes(OS);
+
+ Matchers.front()->emitPredicateOpcodes(OS, *this,
+ getInsnVarID(*Matchers.front()));
+
+ // We must also check if it's safe to fold the matched instructions.
+ if (InsnVariableIDs.size() >= 2) {
+ // Invert the map to create stable ordering (by var names)
+ SmallVector<unsigned, 2> InsnIDs;
+ for (const auto &Pair : InsnVariableIDs) {
+ // Skip the root node since it isn't moving anywhere. Everything else is
+ // sinking to meet it.
+ if (Pair.first == Matchers.front().get())
+ continue;
+
+ InsnIDs.push_back(Pair.second);
+ }
+ std::sort(InsnIDs.begin(), InsnIDs.end());
+
+ for (const auto &InsnID : InsnIDs) {
+ // Reject the difficult cases until we have a more accurate check.
+ OS << " GIM_CheckIsSafeToFold, /*InsnID*/" << InsnID << ",\n";
+
+ // FIXME: Emit checks to determine it's _actually_ safe to fold and/or
+ // account for unsafe cases.
+ //
+ // Example:
+ // MI1--> %0 = ...
+ // %1 = ... %0
+ // MI0--> %2 = ... %0
+ // It's not safe to erase MI1. We currently handle this by not
+ // erasing %0 (even when it's dead).
+ //
+ // Example:
+ // MI1--> %0 = load volatile @a
+ // %1 = load volatile @a
+ // MI0--> %2 = ... %0
+ // It's not safe to sink %0's def past %1. We currently handle
+ // this by rejecting all loads.
+ //
+ // Example:
+ // MI1--> %0 = load @a
+ // %1 = store @a
+ // MI0--> %2 = ... %0
+ // It's not safe to sink %0's def past %1. We currently handle
+ // this by rejecting all loads.
+ //
+ // Example:
+ // G_CONDBR %cond, @BB1
+ // BB0:
+ // MI1--> %0 = load @a
+ // G_BR @BB1
+ // BB1:
+ // MI0--> %2 = ... %0
+ // It's not always safe to sink %0 across control flow. In this
+ // case it may introduce a memory fault. We currentl handle this
+ // by rejecting all loads.
+ }
+ }
+
+ for (const auto &MA : Actions)
+ MA->emitCxxActionStmts(OS, *this, 0);
+ OS << " GIR_Done,\n"
+ << " };\n"
+ << " State.MIs.resize(1);\n"
+ << " DEBUG(dbgs() << \"Processing MatchTable" << CurrentMatchTableID
+ << "\\n\");\n"
+ << " if (executeMatchTable(*this, OutMIs, State, MatcherInfo, MatchTable"
+ << CurrentMatchTableID << ", TII, MRI, TRI, RBI, AvailableFeatures)) {\n"
+ << " return true;\n"
+ << " }\n\n";
+}
+
+bool RuleMatcher::isHigherPriorityThan(const RuleMatcher &B) const {
+ // Rules involving more match roots have higher priority.
+ if (Matchers.size() > B.Matchers.size())
+ return true;
+ if (Matchers.size() < B.Matchers.size())
+ return false;
+
+ for (const auto &Matcher : zip(Matchers, B.Matchers)) {
+ if (std::get<0>(Matcher)->isHigherPriorityThan(*std::get<1>(Matcher)))
+ return true;
+ if (std::get<1>(Matcher)->isHigherPriorityThan(*std::get<0>(Matcher)))
+ return false;
+ }
+
+ return false;
+}
+
+unsigned RuleMatcher::countRendererFns() const {
+ return std::accumulate(
+ Matchers.begin(), Matchers.end(), 0,
+ [](unsigned A, const std::unique_ptr<InstructionMatcher> &Matcher) {
+ return A + Matcher->countRendererFns();
+ });
+}
+
//===- GlobalISelEmitter class --------------------------------------------===//
+class GlobalISelEmitter {
+public:
+ explicit GlobalISelEmitter(RecordKeeper &RK);
+ void run(raw_ostream &OS);
+
+private:
+ const RecordKeeper &RK;
+ const CodeGenDAGPatterns CGP;
+ const CodeGenTarget &Target;
+ CodeGenRegBank CGRegs;
+
+ /// Keep track of the equivalence between SDNodes and Instruction.
+ /// This is defined using 'GINodeEquiv' in the target description.
+ DenseMap<Record *, const CodeGenInstruction *> NodeEquivs;
+
+ /// Keep track of the equivalence between ComplexPattern's and
+ /// GIComplexOperandMatcher. Map entries are specified by subclassing
+ /// GIComplexPatternEquiv.
+ DenseMap<const Record *, const Record *> ComplexPatternEquivs;
+
+ // Map of predicates to their subtarget features.
+ SubtargetFeatureInfoMap SubtargetFeatures;
+
+ void gatherNodeEquivs();
+ const CodeGenInstruction *findNodeEquiv(Record *N) const;
+
+ Error importRulePredicates(RuleMatcher &M, ArrayRef<Init *> Predicates);
+ Expected<InstructionMatcher &>
+ createAndImportSelDAGMatcher(InstructionMatcher &InsnMatcher,
+ const TreePatternNode *Src,
+ unsigned &TempOpIdx) const;
+ Error importChildMatcher(InstructionMatcher &InsnMatcher,
+ const TreePatternNode *SrcChild, unsigned OpIdx,
+ unsigned &TempOpIdx) const;
+ Expected<BuildMIAction &>
+ createAndImportInstructionRenderer(RuleMatcher &M, const TreePatternNode *Dst,
+ const InstructionMatcher &InsnMatcher);
+ Error importExplicitUseRenderer(BuildMIAction &DstMIBuilder,
+ TreePatternNode *DstChild,
+ const InstructionMatcher &InsnMatcher) const;
+ Error importDefaultOperandRenderers(BuildMIAction &DstMIBuilder,
+ DagInit *DefaultOps) const;
+ Error
+ importImplicitDefRenderers(BuildMIAction &DstMIBuilder,
+ const std::vector<Record *> &ImplicitDefs) const;
+
+ /// Analyze pattern \p P, returning a matcher for it if possible.
+ /// Otherwise, return an Error explaining why we don't support it.
+ Expected<RuleMatcher> runOnPattern(const PatternToMatch &P);
+
+ void declareSubtargetFeature(Record *Predicate);
+};
+
void GlobalISelEmitter::gatherNodeEquivs() {
assert(NodeEquivs.empty());
for (Record *Equiv : RK.getAllDerivedDefinitions("GINodeEquiv"))
NodeEquivs[Equiv->getValueAsDef("Node")] =
&Target.getInstruction(Equiv->getValueAsDef("I"));
+
+ assert(ComplexPatternEquivs.empty());
+ for (Record *Equiv : RK.getAllDerivedDefinitions("GIComplexPatternEquiv")) {
+ Record *SelDAGEquiv = Equiv->getValueAsDef("SelDAGEquivalent");
+ if (!SelDAGEquiv)
+ continue;
+ ComplexPatternEquivs[SelDAGEquiv] = Equiv;
+ }
}
-const CodeGenInstruction *GlobalISelEmitter::findNodeEquiv(Record *N) {
+const CodeGenInstruction *GlobalISelEmitter::findNodeEquiv(Record *N) const {
return NodeEquivs.lookup(N);
}
GlobalISelEmitter::GlobalISelEmitter(RecordKeeper &RK)
- : RK(RK), CGP(RK), Target(CGP.getTargetInfo()) {}
+ : RK(RK), CGP(RK), Target(CGP.getTargetInfo()), CGRegs(RK) {}
//===- Emitter ------------------------------------------------------------===//
-Optional<GlobalISelEmitter::SkipReason>
-GlobalISelEmitter::runOnPattern(const PatternToMatch &P, raw_ostream &OS) {
+Error
+GlobalISelEmitter::importRulePredicates(RuleMatcher &M,
+ ArrayRef<Init *> Predicates) {
+ for (const Init *Predicate : Predicates) {
+ const DefInit *PredicateDef = static_cast<const DefInit *>(Predicate);
+ declareSubtargetFeature(PredicateDef->getDef());
+ M.addRequiredFeature(PredicateDef->getDef());
+ }
- // Keep track of the matchers and actions to emit.
- MatcherEmitter M(P);
+ return Error::success();
+}
- // First, analyze the whole pattern.
- // If the entire pattern has a predicate (e.g., target features), ignore it.
- if (!P.getPredicates()->getValues().empty())
- return SkipReason{"Pattern has a predicate"};
+Expected<InstructionMatcher &>
+GlobalISelEmitter::createAndImportSelDAGMatcher(InstructionMatcher &InsnMatcher,
+ const TreePatternNode *Src,
+ unsigned &TempOpIdx) const {
+ const CodeGenInstruction *SrcGIOrNull = nullptr;
- // Physreg imp-defs require additional logic. Ignore the pattern.
- if (!P.getDstRegs().empty())
- return SkipReason{"Pattern defines a physical register"};
+ // Start with the defined operands (i.e., the results of the root operator).
+ if (Src->getExtTypes().size() > 1)
+ return failedImport("Src pattern has multiple results");
+
+ if (Src->isLeaf()) {
+ Init *SrcInit = Src->getLeafValue();
+ if (isa<IntInit>(SrcInit)) {
+ InsnMatcher.addPredicate<InstructionOpcodeMatcher>(
+ &Target.getInstruction(RK.getDef("G_CONSTANT")));
+ } else
+ return failedImport(
+ "Unable to deduce gMIR opcode to handle Src (which is a leaf)");
+ } else {
+ SrcGIOrNull = findNodeEquiv(Src->getOperator());
+ if (!SrcGIOrNull)
+ return failedImport("Pattern operator lacks an equivalent Instruction" +
+ explainOperator(Src->getOperator()));
+ auto &SrcGI = *SrcGIOrNull;
+
+ // The operators look good: match the opcode
+ InsnMatcher.addPredicate<InstructionOpcodeMatcher>(&SrcGI);
+ }
- // Next, analyze the pattern operators.
- TreePatternNode *Src = P.getSrcPattern();
- TreePatternNode *Dst = P.getDstPattern();
+ unsigned OpIdx = 0;
+ for (const EEVT::TypeSet &Ty : Src->getExtTypes()) {
+ auto OpTyOrNone = MVTToLLT(Ty.getConcrete());
- // If the root of either pattern isn't a simple operator, ignore it.
- if (!isTrivialOperatorNode(Dst))
- return SkipReason{"Dst pattern root isn't a trivial operator"};
- if (!isTrivialOperatorNode(Src))
- return SkipReason{"Src pattern root isn't a trivial operator"};
+ if (!OpTyOrNone)
+ return failedImport(
+ "Result of Src pattern operator has an unsupported type");
- Record *DstOp = Dst->getOperator();
- if (!DstOp->isSubClassOf("Instruction"))
- return SkipReason{"Pattern operator isn't an instruction"};
+ // Results don't have a name unless they are the root node. The caller will
+ // set the name if appropriate.
+ OperandMatcher &OM = InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
+ OM.addPredicate<LLTOperandMatcher>(*OpTyOrNone);
+ }
- auto &DstI = Target.getInstruction(DstOp);
+ if (Src->isLeaf()) {
+ Init *SrcInit = Src->getLeafValue();
+ if (IntInit *SrcIntInit = dyn_cast<IntInit>(SrcInit)) {
+ OperandMatcher &OM = InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
+ OM.addPredicate<LiteralIntOperandMatcher>(SrcIntInit->getValue());
+ } else
+ return failedImport(
+ "Unable to deduce gMIR opcode to handle Src (which is a leaf)");
+ } else {
+ assert(SrcGIOrNull &&
+ "Expected to have already found an equivalent Instruction");
+ // Match the used operands (i.e. the children of the operator).
+ for (unsigned i = 0, e = Src->getNumChildren(); i != e; ++i) {
+ TreePatternNode *SrcChild = Src->getChild(i);
+
+ // For G_INTRINSIC, the operand immediately following the defs is an
+ // intrinsic ID.
+ if (SrcGIOrNull->TheDef->getName() == "G_INTRINSIC" && i == 0) {
+ if (const CodeGenIntrinsic *II = Src->getIntrinsicInfo(CGP)) {
+ OperandMatcher &OM =
+ InsnMatcher.addOperand(OpIdx++, SrcChild->getName(), TempOpIdx);
+ OM.addPredicate<IntrinsicIDOperandMatcher>(II);
+ continue;
+ }
- auto SrcGIOrNull = findNodeEquiv(Src->getOperator());
- if (!SrcGIOrNull)
- return SkipReason{"Pattern operator lacks an equivalent Instruction"};
- auto &SrcGI = *SrcGIOrNull;
+ return failedImport("Expected IntInit containing instrinsic ID)");
+ }
- // The operators look good: match the opcode and mutate it to the new one.
- M.Matchers.emplace_back(new MatchOpcode(&SrcGI));
- M.Actions.emplace_back(new MutateOpcode(&DstI));
+ if (auto Error =
+ importChildMatcher(InsnMatcher, SrcChild, OpIdx++, TempOpIdx))
+ return std::move(Error);
+ }
+ }
- // Next, analyze the children, only accepting patterns that don't require
- // any change to operands.
- if (Src->getNumChildren() != Dst->getNumChildren())
- return SkipReason{"Src/dst patterns have a different # of children"};
+ return InsnMatcher;
+}
- unsigned OpIdx = 0;
+Error GlobalISelEmitter::importChildMatcher(InstructionMatcher &InsnMatcher,
+ const TreePatternNode *SrcChild,
+ unsigned OpIdx,
+ unsigned &TempOpIdx) const {
+ OperandMatcher &OM =
+ InsnMatcher.addOperand(OpIdx, SrcChild->getName(), TempOpIdx);
+
+ if (SrcChild->hasAnyPredicate())
+ return failedImport("Src pattern child has predicate (" +
+ explainPredicates(SrcChild) + ")");
+
+ ArrayRef<EEVT::TypeSet> ChildTypes = SrcChild->getExtTypes();
+ if (ChildTypes.size() != 1)
+ return failedImport("Src pattern child has multiple results");
+
+ // Check MBB's before the type check since they are not a known type.
+ if (!SrcChild->isLeaf()) {
+ if (SrcChild->getOperator()->isSubClassOf("SDNode")) {
+ auto &ChildSDNI = CGP.getSDNodeInfo(SrcChild->getOperator());
+ if (ChildSDNI.getSDClassName() == "BasicBlockSDNode") {
+ OM.addPredicate<MBBOperandMatcher>();
+ return Error::success();
+ }
+ }
+ }
- // Start with the defined operands (i.e., the results of the root operator).
- if (DstI.Operands.NumDefs != Src->getExtTypes().size())
- return SkipReason{"Src pattern results and dst MI defs are different"};
+ auto OpTyOrNone = MVTToLLT(ChildTypes.front().getConcrete());
+ if (!OpTyOrNone)
+ return failedImport("Src operand has an unsupported type (" + to_string(*SrcChild) + ")");
+ OM.addPredicate<LLTOperandMatcher>(*OpTyOrNone);
+
+ // Check for nested instructions.
+ if (!SrcChild->isLeaf()) {
+ // Map the node to a gMIR instruction.
+ InstructionOperandMatcher &InsnOperand =
+ OM.addPredicate<InstructionOperandMatcher>();
+ auto InsnMatcherOrError = createAndImportSelDAGMatcher(
+ InsnOperand.getInsnMatcher(), SrcChild, TempOpIdx);
+ if (auto Error = InsnMatcherOrError.takeError())
+ return Error;
+
+ return Error::success();
+ }
- for (const EEVT::TypeSet &Ty : Src->getExtTypes()) {
- Record *DstIOpRec = DstI.Operands[OpIdx].Rec;
- if (!DstIOpRec->isSubClassOf("RegisterClass"))
- return SkipReason{"Dst MI def isn't a register class"};
+ // Check for constant immediates.
+ if (auto *ChildInt = dyn_cast<IntInit>(SrcChild->getLeafValue())) {
+ OM.addPredicate<ConstantIntOperandMatcher>(ChildInt->getValue());
+ return Error::success();
+ }
- auto OpTyOrNone = MVTToLLT(Ty.getConcrete());
- if (!OpTyOrNone)
- return SkipReason{"Dst operand has an unsupported type"};
+ // Check for def's like register classes or ComplexPattern's.
+ if (auto *ChildDefInit = dyn_cast<DefInit>(SrcChild->getLeafValue())) {
+ auto *ChildRec = ChildDefInit->getDef();
- M.Matchers.emplace_back(new MatchRegOpType(OpIdx, *OpTyOrNone));
- M.Matchers.emplace_back(
- new MatchRegOpBank(OpIdx, Target.getRegisterClass(DstIOpRec)));
- ++OpIdx;
- }
+ // Check for register classes.
+ if (ChildRec->isSubClassOf("RegisterClass") ||
+ ChildRec->isSubClassOf("RegisterOperand")) {
+ OM.addPredicate<RegisterBankOperandMatcher>(
+ Target.getRegisterClass(getInitValueAsRegClass(ChildDefInit)));
+ return Error::success();
+ }
- // Finally match the used operands (i.e., the children of the root operator).
- for (unsigned i = 0, e = Src->getNumChildren(); i != e; ++i) {
- auto *SrcChild = Src->getChild(i);
- auto *DstChild = Dst->getChild(i);
+ // Check for ComplexPattern's.
+ if (ChildRec->isSubClassOf("ComplexPattern")) {
+ const auto &ComplexPattern = ComplexPatternEquivs.find(ChildRec);
+ if (ComplexPattern == ComplexPatternEquivs.end())
+ return failedImport("SelectionDAG ComplexPattern (" +
+ ChildRec->getName() + ") not mapped to GlobalISel");
+
+ OM.addPredicate<ComplexPatternOperandMatcher>(OM,
+ *ComplexPattern->second);
+ TempOpIdx++;
+ return Error::success();
+ }
- // Patterns can reorder operands. Ignore those for now.
- if (SrcChild->getName() != DstChild->getName())
- return SkipReason{"Src/dst pattern children not in same order"};
+ if (ChildRec->isSubClassOf("ImmLeaf")) {
+ return failedImport(
+ "Src pattern child def is an unsupported tablegen class (ImmLeaf)");
+ }
- // The only non-leaf child we accept is 'bb': it's an operator because
- // BasicBlockSDNode isn't inline, but in MI it's just another operand.
- if (!SrcChild->isLeaf()) {
- if (DstChild->isLeaf() ||
- SrcChild->getOperator() != DstChild->getOperator())
- return SkipReason{"Src/dst pattern child operator mismatch"};
+ return failedImport(
+ "Src pattern child def is an unsupported tablegen class");
+ }
- if (SrcChild->getOperator()->isSubClassOf("SDNode")) {
- auto &ChildSDNI = CGP.getSDNodeInfo(SrcChild->getOperator());
- if (ChildSDNI.getSDClassName() == "BasicBlockSDNode") {
- M.Matchers.emplace_back(new MatchMBBOp(OpIdx++));
- continue;
- }
+ return failedImport("Src pattern child is an unsupported kind");
+}
+
+Error GlobalISelEmitter::importExplicitUseRenderer(
+ BuildMIAction &DstMIBuilder, TreePatternNode *DstChild,
+ const InstructionMatcher &InsnMatcher) const {
+ // The only non-leaf child we accept is 'bb': it's an operator because
+ // BasicBlockSDNode isn't inline, but in MI it's just another operand.
+ if (!DstChild->isLeaf()) {
+ if (DstChild->getOperator()->isSubClassOf("SDNode")) {
+ auto &ChildSDNI = CGP.getSDNodeInfo(DstChild->getOperator());
+ if (ChildSDNI.getSDClassName() == "BasicBlockSDNode") {
+ DstMIBuilder.addRenderer<CopyRenderer>(0, InsnMatcher,
+ DstChild->getName());
+ return Error::success();
}
- return SkipReason{"Src pattern child isn't a leaf node"};
}
+ return failedImport("Dst pattern child isn't a leaf node or an MBB");
+ }
- if (SrcChild->getLeafValue() != DstChild->getLeafValue())
- return SkipReason{"Src/dst pattern child leaf mismatch"};
+ // Otherwise, we're looking for a bog-standard RegisterClass operand.
+ if (DstChild->hasAnyPredicate())
+ return failedImport("Dst pattern child has predicate (" +
+ explainPredicates(DstChild) + ")");
- // Otherwise, we're looking for a bog-standard RegisterClass operand.
- if (SrcChild->hasAnyPredicate())
- return SkipReason{"Src pattern child has predicate"};
- auto *ChildRec = cast<DefInit>(SrcChild->getLeafValue())->getDef();
- if (!ChildRec->isSubClassOf("RegisterClass"))
- return SkipReason{"Src pattern child isn't a RegisterClass"};
+ if (auto *ChildDefInit = dyn_cast<DefInit>(DstChild->getLeafValue())) {
+ auto *ChildRec = ChildDefInit->getDef();
- ArrayRef<EEVT::TypeSet> ChildTypes = SrcChild->getExtTypes();
+ ArrayRef<EEVT::TypeSet> ChildTypes = DstChild->getExtTypes();
if (ChildTypes.size() != 1)
- return SkipReason{"Src pattern child has multiple results"};
+ return failedImport("Dst pattern child has multiple results");
auto OpTyOrNone = MVTToLLT(ChildTypes.front().getConcrete());
if (!OpTyOrNone)
- return SkipReason{"Src operand has an unsupported type"};
+ return failedImport("Dst operand has an unsupported type");
+
+ if (ChildRec->isSubClassOf("Register")) {
+ DstMIBuilder.addRenderer<AddRegisterRenderer>(0, ChildRec);
+ return Error::success();
+ }
+
+ if (ChildRec->isSubClassOf("RegisterClass") ||
+ ChildRec->isSubClassOf("RegisterOperand")) {
+ DstMIBuilder.addRenderer<CopyRenderer>(0, InsnMatcher,
+ DstChild->getName());
+ return Error::success();
+ }
+
+ if (ChildRec->isSubClassOf("ComplexPattern")) {
+ const auto &ComplexPattern = ComplexPatternEquivs.find(ChildRec);
+ if (ComplexPattern == ComplexPatternEquivs.end())
+ return failedImport(
+ "SelectionDAG ComplexPattern not mapped to GlobalISel");
+
+ const OperandMatcher &OM = InsnMatcher.getOperand(DstChild->getName());
+ DstMIBuilder.addRenderer<RenderComplexPatternOperand>(
+ 0, *ComplexPattern->second, DstChild->getName(),
+ OM.getAllocatedTemporariesBaseID());
+ return Error::success();
+ }
+
+ if (ChildRec->isSubClassOf("SDNodeXForm"))
+ return failedImport("Dst pattern child def is an unsupported tablegen "
+ "class (SDNodeXForm)");
+
+ return failedImport(
+ "Dst pattern child def is an unsupported tablegen class");
+ }
+
+ return failedImport("Dst pattern child is an unsupported kind");
+}
+
+Expected<BuildMIAction &> GlobalISelEmitter::createAndImportInstructionRenderer(
+ RuleMatcher &M, const TreePatternNode *Dst,
+ const InstructionMatcher &InsnMatcher) {
+ Record *DstOp = Dst->getOperator();
+ if (!DstOp->isSubClassOf("Instruction")) {
+ if (DstOp->isSubClassOf("ValueType"))
+ return failedImport(
+ "Pattern operator isn't an instruction (it's a ValueType)");
+ return failedImport("Pattern operator isn't an instruction");
+ }
+ CodeGenInstruction *DstI = &Target.getInstruction(DstOp);
+
+ unsigned DstINumUses = DstI->Operands.size() - DstI->Operands.NumDefs;
+ unsigned ExpectedDstINumUses = Dst->getNumChildren();
+ bool IsExtractSubReg = false;
+
+ // COPY_TO_REGCLASS is just a copy with a ConstrainOperandToRegClassAction
+ // attached. Similarly for EXTRACT_SUBREG except that's a subregister copy.
+ if (DstI->TheDef->getName() == "COPY_TO_REGCLASS") {
+ DstI = &Target.getInstruction(RK.getDef("COPY"));
+ DstINumUses--; // Ignore the class constraint.
+ ExpectedDstINumUses--;
+ } else if (DstI->TheDef->getName() == "EXTRACT_SUBREG") {
+ DstI = &Target.getInstruction(RK.getDef("COPY"));
+ IsExtractSubReg = true;
+ }
+
+ auto &DstMIBuilder = M.addAction<BuildMIAction>(0, DstI, InsnMatcher);
+
+ // Render the explicit defs.
+ for (unsigned I = 0; I < DstI->Operands.NumDefs; ++I) {
+ const CGIOperandList::OperandInfo &DstIOperand = DstI->Operands[I];
+ DstMIBuilder.addRenderer<CopyRenderer>(0, InsnMatcher, DstIOperand.Name);
+ }
+
+ // EXTRACT_SUBREG needs to use a subregister COPY.
+ if (IsExtractSubReg) {
+ if (!Dst->getChild(0)->isLeaf())
+ return failedImport("EXTRACT_SUBREG child #1 is not a leaf");
+
+ if (DefInit *SubRegInit =
+ dyn_cast<DefInit>(Dst->getChild(1)->getLeafValue())) {
+ CodeGenRegisterClass *RC = CGRegs.getRegClass(
+ getInitValueAsRegClass(Dst->getChild(0)->getLeafValue()));
+ CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(SubRegInit->getDef());
+
+ const auto &SrcRCDstRCPair =
+ RC->getMatchingSubClassWithSubRegs(CGRegs, SubIdx);
+ if (SrcRCDstRCPair.hasValue()) {
+ assert(SrcRCDstRCPair->second && "Couldn't find a matching subclass");
+ if (SrcRCDstRCPair->first != RC)
+ return failedImport("EXTRACT_SUBREG requires an additional COPY");
+ }
+
+ DstMIBuilder.addRenderer<CopySubRegRenderer>(
+ 0, InsnMatcher, Dst->getChild(0)->getName(), SubIdx);
+ return DstMIBuilder;
+ }
+
+ return failedImport("EXTRACT_SUBREG child #1 is not a subreg index");
+ }
+
+ // Render the explicit uses.
+ unsigned Child = 0;
+ unsigned NumDefaultOps = 0;
+ for (unsigned I = 0; I != DstINumUses; ++I) {
+ const CGIOperandList::OperandInfo &DstIOperand =
+ DstI->Operands[DstI->Operands.NumDefs + I];
+
+ // If the operand has default values, introduce them now.
+ // FIXME: Until we have a decent test case that dictates we should do
+ // otherwise, we're going to assume that operands with default values cannot
+ // be specified in the patterns. Therefore, adding them will not cause us to
+ // end up with too many rendered operands.
+ if (DstIOperand.Rec->isSubClassOf("OperandWithDefaultOps")) {
+ DagInit *DefaultOps = DstIOperand.Rec->getValueAsDag("DefaultOps");
+ if (auto Error = importDefaultOperandRenderers(DstMIBuilder, DefaultOps))
+ return std::move(Error);
+ ++NumDefaultOps;
+ continue;
+ }
- M.Matchers.emplace_back(new MatchRegOpType(OpIdx, *OpTyOrNone));
- M.Matchers.emplace_back(
- new MatchRegOpBank(OpIdx, Target.getRegisterClass(ChildRec)));
+ if (auto Error = importExplicitUseRenderer(
+ DstMIBuilder, Dst->getChild(Child), InsnMatcher))
+ return std::move(Error);
+ ++Child;
+ }
+
+ if (NumDefaultOps + ExpectedDstINumUses != DstINumUses)
+ return failedImport("Expected " + llvm::to_string(DstINumUses) +
+ " used operands but found " +
+ llvm::to_string(ExpectedDstINumUses) +
+ " explicit ones and " + llvm::to_string(NumDefaultOps) +
+ " default ones");
+
+ return DstMIBuilder;
+}
+
+Error GlobalISelEmitter::importDefaultOperandRenderers(
+ BuildMIAction &DstMIBuilder, DagInit *DefaultOps) const {
+ for (const auto *DefaultOp : DefaultOps->getArgs()) {
+ // Look through ValueType operators.
+ if (const DagInit *DefaultDagOp = dyn_cast<DagInit>(DefaultOp)) {
+ if (const DefInit *DefaultDagOperator =
+ dyn_cast<DefInit>(DefaultDagOp->getOperator())) {
+ if (DefaultDagOperator->getDef()->isSubClassOf("ValueType"))
+ DefaultOp = DefaultDagOp->getArg(0);
+ }
+ }
+
+ if (const DefInit *DefaultDefOp = dyn_cast<DefInit>(DefaultOp)) {
+ DstMIBuilder.addRenderer<AddRegisterRenderer>(0, DefaultDefOp->getDef());
+ continue;
+ }
+
+ if (const IntInit *DefaultIntOp = dyn_cast<IntInit>(DefaultOp)) {
+ DstMIBuilder.addRenderer<ImmRenderer>(0, DefaultIntOp->getValue());
+ continue;
+ }
+
+ return failedImport("Could not add default op");
+ }
+
+ return Error::success();
+}
+
+Error GlobalISelEmitter::importImplicitDefRenderers(
+ BuildMIAction &DstMIBuilder,
+ const std::vector<Record *> &ImplicitDefs) const {
+ if (!ImplicitDefs.empty())
+ return failedImport("Pattern defines a physical register");
+ return Error::success();
+}
+
+Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
+ // Keep track of the matchers and actions to emit.
+ RuleMatcher M;
+ M.addAction<DebugCommentAction>(P);
+
+ if (auto Error = importRulePredicates(M, P.getPredicates()->getValues()))
+ return std::move(Error);
+
+ // Next, analyze the pattern operators.
+ TreePatternNode *Src = P.getSrcPattern();
+ TreePatternNode *Dst = P.getDstPattern();
+
+ // If the root of either pattern isn't a simple operator, ignore it.
+ if (auto Err = isTrivialOperatorNode(Dst))
+ return failedImport("Dst pattern root isn't a trivial operator (" +
+ toString(std::move(Err)) + ")");
+ if (auto Err = isTrivialOperatorNode(Src))
+ return failedImport("Src pattern root isn't a trivial operator (" +
+ toString(std::move(Err)) + ")");
+
+ if (Dst->isLeaf())
+ return failedImport("Dst pattern root isn't a known leaf");
+
+ // Start with the defined operands (i.e., the results of the root operator).
+ Record *DstOp = Dst->getOperator();
+ if (!DstOp->isSubClassOf("Instruction"))
+ return failedImport("Pattern operator isn't an instruction");
+
+ auto &DstI = Target.getInstruction(DstOp);
+ if (DstI.Operands.NumDefs != Src->getExtTypes().size())
+ return failedImport("Src pattern results and dst MI defs are different (" +
+ to_string(Src->getExtTypes().size()) + " def(s) vs " +
+ to_string(DstI.Operands.NumDefs) + " def(s))");
+
+ InstructionMatcher &InsnMatcherTemp = M.addInstructionMatcher();
+ unsigned TempOpIdx = 0;
+ auto InsnMatcherOrError =
+ createAndImportSelDAGMatcher(InsnMatcherTemp, Src, TempOpIdx);
+ if (auto Error = InsnMatcherOrError.takeError())
+ return std::move(Error);
+ InstructionMatcher &InsnMatcher = InsnMatcherOrError.get();
+
+ // The root of the match also has constraints on the register bank so that it
+ // matches the result instruction.
+ unsigned OpIdx = 0;
+ for (const EEVT::TypeSet &Ty : Src->getExtTypes()) {
+ (void)Ty;
+
+ const auto &DstIOperand = DstI.Operands[OpIdx];
+ Record *DstIOpRec = DstIOperand.Rec;
+ if (DstI.TheDef->getName() == "COPY_TO_REGCLASS") {
+ DstIOpRec = getInitValueAsRegClass(Dst->getChild(1)->getLeafValue());
+
+ if (DstIOpRec == nullptr)
+ return failedImport(
+ "COPY_TO_REGCLASS operand #1 isn't a register class");
+ } else if (DstI.TheDef->getName() == "EXTRACT_SUBREG") {
+ if (!Dst->getChild(0)->isLeaf())
+ return failedImport("EXTRACT_SUBREG operand #0 isn't a leaf");
+
+ // We can assume that a subregister is in the same bank as it's super
+ // register.
+ DstIOpRec = getInitValueAsRegClass(Dst->getChild(0)->getLeafValue());
+
+ if (DstIOpRec == nullptr)
+ return failedImport(
+ "EXTRACT_SUBREG operand #0 isn't a register class");
+ } else if (DstIOpRec->isSubClassOf("RegisterOperand"))
+ DstIOpRec = DstIOpRec->getValueAsDef("RegClass");
+ else if (!DstIOpRec->isSubClassOf("RegisterClass"))
+ return failedImport("Dst MI def isn't a register class" +
+ to_string(*Dst));
+
+ OperandMatcher &OM = InsnMatcher.getOperand(OpIdx);
+ OM.setSymbolicName(DstIOperand.Name);
+ OM.addPredicate<RegisterBankOperandMatcher>(
+ Target.getRegisterClass(DstIOpRec));
++OpIdx;
}
- // We're done with this pattern! Emit the processed result.
- M.emit(OS);
- ++NumPatternEmitted;
- return None;
+ auto DstMIBuilderOrError =
+ createAndImportInstructionRenderer(M, Dst, InsnMatcher);
+ if (auto Error = DstMIBuilderOrError.takeError())
+ return std::move(Error);
+ BuildMIAction &DstMIBuilder = DstMIBuilderOrError.get();
+
+ // Render the implicit defs.
+ // These are only added to the root of the result.
+ if (auto Error = importImplicitDefRenderers(DstMIBuilder, P.getDstRegs()))
+ return std::move(Error);
+
+ // Constrain the registers to classes. This is normally derived from the
+ // emitted instruction but a few instructions require special handling.
+ if (DstI.TheDef->getName() == "COPY_TO_REGCLASS") {
+ // COPY_TO_REGCLASS does not provide operand constraints itself but the
+ // result is constrained to the class given by the second child.
+ Record *DstIOpRec =
+ getInitValueAsRegClass(Dst->getChild(1)->getLeafValue());
+
+ if (DstIOpRec == nullptr)
+ return failedImport("COPY_TO_REGCLASS operand #1 isn't a register class");
+
+ M.addAction<ConstrainOperandToRegClassAction>(
+ 0, 0, Target.getRegisterClass(DstIOpRec));
+
+ // We're done with this pattern! It's eligible for GISel emission; return
+ // it.
+ ++NumPatternImported;
+ return std::move(M);
+ }
+
+ if (DstI.TheDef->getName() == "EXTRACT_SUBREG") {
+ // EXTRACT_SUBREG selects into a subregister COPY but unlike most
+ // instructions, the result register class is controlled by the
+ // subregisters of the operand. As a result, we must constrain the result
+ // class rather than check that it's already the right one.
+ if (!Dst->getChild(0)->isLeaf())
+ return failedImport("EXTRACT_SUBREG child #1 is not a leaf");
+
+ DefInit *SubRegInit = dyn_cast<DefInit>(Dst->getChild(1)->getLeafValue());
+ if (!SubRegInit)
+ return failedImport("EXTRACT_SUBREG child #1 is not a subreg index");
+
+ // Constrain the result to the same register bank as the operand.
+ Record *DstIOpRec =
+ getInitValueAsRegClass(Dst->getChild(0)->getLeafValue());
+
+ if (DstIOpRec == nullptr)
+ return failedImport("EXTRACT_SUBREG operand #1 isn't a register class");
+
+ CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(SubRegInit->getDef());
+ CodeGenRegisterClass *SrcRC = CGRegs.getRegClass(DstIOpRec);
+
+ // It would be nice to leave this constraint implicit but we're required
+ // to pick a register class so constrain the result to a register class
+ // that can hold the correct MVT.
+ //
+ // FIXME: This may introduce an extra copy if the chosen class doesn't
+ // actually contain the subregisters.
+ assert(Src->getExtTypes().size() == 1 &&
+ "Expected Src of EXTRACT_SUBREG to have one result type");
+
+ const auto &SrcRCDstRCPair =
+ SrcRC->getMatchingSubClassWithSubRegs(CGRegs, SubIdx);
+ assert(SrcRCDstRCPair->second && "Couldn't find a matching subclass");
+ M.addAction<ConstrainOperandToRegClassAction>(0, 0, *SrcRCDstRCPair->second);
+ M.addAction<ConstrainOperandToRegClassAction>(0, 1, *SrcRCDstRCPair->first);
+
+ // We're done with this pattern! It's eligible for GISel emission; return
+ // it.
+ ++NumPatternImported;
+ return std::move(M);
+ }
+
+ M.addAction<ConstrainOperandsToDefinitionAction>(0);
+
+ // We're done with this pattern! It's eligible for GISel emission; return it.
+ ++NumPatternImported;
+ return std::move(M);
}
void GlobalISelEmitter::run(raw_ostream &OS) {
@@ -359,26 +2012,241 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
emitSourceFileHeader(("Global Instruction Selector for the " +
Target.getName() + " target").str(), OS);
- OS << "bool " << Target.getName()
- << "InstructionSelector::selectImpl"
- "(MachineInstr &I) const {\n const MachineRegisterInfo &MRI = "
- "I.getParent()->getParent()->getRegInfo();\n";
-
+ std::vector<RuleMatcher> Rules;
// Look through the SelectionDAG patterns we found, possibly emitting some.
for (const PatternToMatch &Pat : CGP.ptms()) {
++NumPatternTotal;
- if (auto SkipReason = runOnPattern(Pat, OS)) {
+ auto MatcherOrErr = runOnPattern(Pat);
+
+ // The pattern analysis can fail, indicating an unsupported pattern.
+ // Report that if we've been asked to do so.
+ if (auto Err = MatcherOrErr.takeError()) {
if (WarnOnSkippedPatterns) {
PrintWarning(Pat.getSrcRecord()->getLoc(),
- "Skipped pattern: " + SkipReason->Reason);
+ "Skipped pattern: " + toString(std::move(Err)));
+ } else {
+ consumeError(std::move(Err));
}
- ++NumPatternSkipped;
+ ++NumPatternImportsSkipped;
+ continue;
}
+
+ Rules.push_back(std::move(MatcherOrErr.get()));
}
- OS << " return false;\n}\n";
+ std::stable_sort(Rules.begin(), Rules.end(),
+ [&](const RuleMatcher &A, const RuleMatcher &B) {
+ if (A.isHigherPriorityThan(B)) {
+ assert(!B.isHigherPriorityThan(A) && "Cannot be more important "
+ "and less important at "
+ "the same time");
+ return true;
+ }
+ return false;
+ });
+
+ std::vector<Record *> ComplexPredicates =
+ RK.getAllDerivedDefinitions("GIComplexOperandMatcher");
+ std::sort(ComplexPredicates.begin(), ComplexPredicates.end(),
+ [](const Record *A, const Record *B) {
+ if (A->getName() < B->getName())
+ return true;
+ return false;
+ });
+ unsigned MaxTemporaries = 0;
+ for (const auto &Rule : Rules)
+ MaxTemporaries = std::max(MaxTemporaries, Rule.countRendererFns());
+
+ OS << "#ifdef GET_GLOBALISEL_PREDICATE_BITSET\n"
+ << "const unsigned MAX_SUBTARGET_PREDICATES = " << SubtargetFeatures.size()
+ << ";\n"
+ << "using PredicateBitset = "
+ "llvm::PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;\n"
+ << "#endif // ifdef GET_GLOBALISEL_PREDICATE_BITSET\n\n";
+
+ OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n"
+ << " mutable MatcherState State;\n"
+ << " typedef "
+ "ComplexRendererFn("
+ << Target.getName()
+ << "InstructionSelector::*ComplexMatcherMemFn)(MachineOperand &) const;\n"
+ << "const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> "
+ "MatcherInfo;\n"
+ << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n\n";
+
+ OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n"
+ << ", State(" << MaxTemporaries << "),\n"
+ << "MatcherInfo({TypeObjects, FeatureBitsets, {\n"
+ << " nullptr, // GICP_Invalid\n";
+ for (const auto &Record : ComplexPredicates)
+ OS << " &" << Target.getName()
+ << "InstructionSelector::" << Record->getValueAsString("MatcherFn")
+ << ", // " << Record->getName() << "\n";
+ OS << "}})\n"
+ << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n\n";
+
+ OS << "#ifdef GET_GLOBALISEL_IMPL\n";
+ SubtargetFeatureInfo::emitSubtargetFeatureBitEnumeration(SubtargetFeatures,
+ OS);
+
+ // Separate subtarget features by how often they must be recomputed.
+ SubtargetFeatureInfoMap ModuleFeatures;
+ std::copy_if(SubtargetFeatures.begin(), SubtargetFeatures.end(),
+ std::inserter(ModuleFeatures, ModuleFeatures.end()),
+ [](const SubtargetFeatureInfoMap::value_type &X) {
+ return !X.second.mustRecomputePerFunction();
+ });
+ SubtargetFeatureInfoMap FunctionFeatures;
+ std::copy_if(SubtargetFeatures.begin(), SubtargetFeatures.end(),
+ std::inserter(FunctionFeatures, FunctionFeatures.end()),
+ [](const SubtargetFeatureInfoMap::value_type &X) {
+ return X.second.mustRecomputePerFunction();
+ });
+
+ SubtargetFeatureInfo::emitComputeAvailableFeatures(
+ Target.getName(), "InstructionSelector", "computeAvailableModuleFeatures",
+ ModuleFeatures, OS);
+ SubtargetFeatureInfo::emitComputeAvailableFeatures(
+ Target.getName(), "InstructionSelector",
+ "computeAvailableFunctionFeatures", FunctionFeatures, OS,
+ "const MachineFunction *MF");
+
+ // Emit a table containing the LLT objects needed by the matcher and an enum
+ // for the matcher to reference them with.
+ std::vector<LLTCodeGen> TypeObjects = {
+ LLT::scalar(8), LLT::scalar(16), LLT::scalar(32),
+ LLT::scalar(64), LLT::scalar(80), LLT::vector(8, 1),
+ LLT::vector(16, 1), LLT::vector(32, 1), LLT::vector(64, 1),
+ LLT::vector(8, 8), LLT::vector(16, 8), LLT::vector(32, 8),
+ LLT::vector(64, 8), LLT::vector(4, 16), LLT::vector(8, 16),
+ LLT::vector(16, 16), LLT::vector(32, 16), LLT::vector(2, 32),
+ LLT::vector(4, 32), LLT::vector(8, 32), LLT::vector(16, 32),
+ LLT::vector(2, 64), LLT::vector(4, 64), LLT::vector(8, 64),
+ };
+ std::sort(TypeObjects.begin(), TypeObjects.end());
+ OS << "enum {\n";
+ for (const auto &TypeObject : TypeObjects) {
+ OS << " ";
+ TypeObject.emitCxxEnumValue(OS);
+ OS << ",\n";
+ }
+ OS << "};\n"
+ << "const static LLT TypeObjects[] = {\n";
+ for (const auto &TypeObject : TypeObjects) {
+ OS << " ";
+ TypeObject.emitCxxConstructorCall(OS);
+ OS << ",\n";
+ }
+ OS << "};\n\n";
+
+ // Emit a table containing the PredicateBitsets objects needed by the matcher
+ // and an enum for the matcher to reference them with.
+ std::vector<std::vector<Record *>> FeatureBitsets;
+ for (auto &Rule : Rules)
+ FeatureBitsets.push_back(Rule.getRequiredFeatures());
+ std::sort(
+ FeatureBitsets.begin(), FeatureBitsets.end(),
+ [&](const std::vector<Record *> &A, const std::vector<Record *> &B) {
+ if (A.size() < B.size())
+ return true;
+ if (A.size() > B.size())
+ return false;
+ for (const auto &Pair : zip(A, B)) {
+ if (std::get<0>(Pair)->getName() < std::get<1>(Pair)->getName())
+ return true;
+ if (std::get<0>(Pair)->getName() > std::get<1>(Pair)->getName())
+ return false;
+ }
+ return false;
+ });
+ FeatureBitsets.erase(
+ std::unique(FeatureBitsets.begin(), FeatureBitsets.end()),
+ FeatureBitsets.end());
+ OS << "enum {\n"
+ << " GIFBS_Invalid,\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " " << getNameForFeatureBitset(FeatureBitset) << ",\n";
+ }
+ OS << "};\n"
+ << "const static PredicateBitset FeatureBitsets[] {\n"
+ << " {}, // GIFBS_Invalid\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " {";
+ for (const auto &Feature : FeatureBitset) {
+ const auto &I = SubtargetFeatures.find(Feature);
+ assert(I != SubtargetFeatures.end() && "Didn't import predicate?");
+ OS << I->second.getEnumBitName() << ", ";
+ }
+ OS << "},\n";
+ }
+ OS << "};\n\n";
+
+ // Emit complex predicate table and an enum to reference them with.
+ OS << "enum {\n"
+ << " GICP_Invalid,\n";
+ for (const auto &Record : ComplexPredicates)
+ OS << " GICP_" << Record->getName() << ",\n";
+ OS << "};\n"
+ << "// See constructor for table contents\n\n";
+
+ OS << "bool " << Target.getName()
+ << "InstructionSelector::selectImpl(MachineInstr &I) const {\n"
+ << " MachineFunction &MF = *I.getParent()->getParent();\n"
+ << " MachineRegisterInfo &MRI = MF.getRegInfo();\n"
+ << " // FIXME: This should be computed on a per-function basis rather "
+ "than per-insn.\n"
+ << " AvailableFunctionFeatures = computeAvailableFunctionFeatures(&STI, "
+ "&MF);\n"
+ << " const PredicateBitset AvailableFeatures = getAvailableFeatures();\n"
+ << " NewMIVector OutMIs;\n"
+ << " State.MIs.clear();\n"
+ << " State.MIs.push_back(&I);\n\n";
+
+ for (auto &Rule : Rules) {
+ Rule.emit(OS);
+ ++CurrentMatchTableID;
+ ++NumPatternEmitted;
+ assert(CurrentMatchTableID == NumPatternEmitted &&
+ "Statistic deviates from number of emitted tables");
+ }
+
+ OS << " return false;\n"
+ << "}\n"
+ << "#endif // ifdef GET_GLOBALISEL_IMPL\n";
+
+ OS << "#ifdef GET_GLOBALISEL_PREDICATES_DECL\n"
+ << "PredicateBitset AvailableModuleFeatures;\n"
+ << "mutable PredicateBitset AvailableFunctionFeatures;\n"
+ << "PredicateBitset getAvailableFeatures() const {\n"
+ << " return AvailableModuleFeatures | AvailableFunctionFeatures;\n"
+ << "}\n"
+ << "PredicateBitset\n"
+ << "computeAvailableModuleFeatures(const " << Target.getName()
+ << "Subtarget *Subtarget) const;\n"
+ << "PredicateBitset\n"
+ << "computeAvailableFunctionFeatures(const " << Target.getName()
+ << "Subtarget *Subtarget,\n"
+ << " const MachineFunction *MF) const;\n"
+ << "#endif // ifdef GET_GLOBALISEL_PREDICATES_DECL\n";
+
+ OS << "#ifdef GET_GLOBALISEL_PREDICATES_INIT\n"
+ << "AvailableModuleFeatures(computeAvailableModuleFeatures(&STI)),\n"
+ << "AvailableFunctionFeatures()\n"
+ << "#endif // ifdef GET_GLOBALISEL_PREDICATES_INIT\n";
}
+void GlobalISelEmitter::declareSubtargetFeature(Record *Predicate) {
+ if (SubtargetFeatures.count(Predicate) == 0)
+ SubtargetFeatures.emplace(
+ Predicate, SubtargetFeatureInfo(Predicate, SubtargetFeatures.size()));
+}
+
+} // end anonymous namespace
+
//===----------------------------------------------------------------------===//
namespace llvm {
diff --git a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
index ab7d964..e270a17 100644
--- a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -67,7 +67,7 @@ private:
void emitOperandTypesEnum(raw_ostream &OS, const CodeGenTarget &Target);
void initOperandMapData(
ArrayRef<const CodeGenInstruction *> NumberedInstructions,
- const std::string &Namespace,
+ StringRef Namespace,
std::map<std::string, unsigned> &Operands,
OpNameMapTy &OperandMap);
void emitOperandNameMappings(raw_ostream &OS, const CodeGenTarget &Target,
@@ -207,7 +207,7 @@ void InstrInfoEmitter::EmitOperandInfo(raw_ostream &OS,
/// well as the getNamedOperandIdx() function.
void InstrInfoEmitter::initOperandMapData(
ArrayRef<const CodeGenInstruction *> NumberedInstructions,
- const std::string &Namespace,
+ StringRef Namespace,
std::map<std::string, unsigned> &Operands,
OpNameMapTy &OperandMap) {
unsigned NumOperands = 0;
@@ -224,7 +224,7 @@ void InstrInfoEmitter::initOperandMapData(
}
OpList[I->second] = Info.MIOperandNo;
}
- OperandMap[OpList].push_back(Namespace + "::" +
+ OperandMap[OpList].push_back(Namespace.str() + "::" +
Inst->TheDef->getName().str());
}
}
@@ -243,7 +243,7 @@ void InstrInfoEmitter::initOperandMapData(
void InstrInfoEmitter::emitOperandNameMappings(raw_ostream &OS,
const CodeGenTarget &Target,
ArrayRef<const CodeGenInstruction*> NumberedInstructions) {
- const std::string &Namespace = Target.getInstNamespace();
+ StringRef Namespace = Target.getInstNamespace();
std::string OpNameNS = "OpName";
// Map of operand names to their enumeration value. This will be used to
// generate the OpName enum.
@@ -315,7 +315,7 @@ void InstrInfoEmitter::emitOperandNameMappings(raw_ostream &OS,
void InstrInfoEmitter::emitOperandTypesEnum(raw_ostream &OS,
const CodeGenTarget &Target) {
- const std::string &Namespace = Target.getInstNamespace();
+ StringRef Namespace = Target.getInstNamespace();
std::vector<Record *> Operands = Records.getAllDerivedDefinitions("Operand");
OS << "#ifdef GET_INSTRINFO_OPERAND_TYPES_ENUM\n";
@@ -576,7 +576,7 @@ void InstrInfoEmitter::emitEnums(raw_ostream &OS) {
CodeGenTarget Target(Records);
// We must emit the PHI opcode first...
- std::string Namespace = Target.getInstNamespace();
+ StringRef Namespace = Target.getInstNamespace();
if (Namespace.empty())
PrintFatalError("No instructions defined!");
diff --git a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
index 33256cc..caa52d2 100644
--- a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -84,14 +84,11 @@ void IntrinsicEmitter::run(raw_ostream &OS) {
// Emit the intrinsic parameter attributes.
EmitAttributes(Ints, OS);
- // Individual targets don't need GCC builtin name mappings.
- if (!TargetOnly) {
- // Emit code to translate GCC builtins into LLVM intrinsics.
- EmitIntrinsicToBuiltinMap(Ints, true, OS);
+ // Emit code to translate GCC builtins into LLVM intrinsics.
+ EmitIntrinsicToBuiltinMap(Ints, true, OS);
- // Emit code to translate MS builtins into LLVM intrinsics.
- EmitIntrinsicToBuiltinMap(Ints, false, OS);
- }
+ // Emit code to translate MS builtins into LLVM intrinsics.
+ EmitIntrinsicToBuiltinMap(Ints, false, OS);
EmitSuffix(OS);
}
@@ -133,14 +130,14 @@ void IntrinsicEmitter::EmitTargetInfo(const CodeGenIntrinsicTable &Ints,
OS << "// Target mapping\n";
OS << "#ifdef GET_INTRINSIC_TARGET_DATA\n";
OS << "struct IntrinsicTargetInfo {\n"
- << " StringRef Name;\n"
+ << " llvm::StringLiteral Name;\n"
<< " size_t Offset;\n"
<< " size_t Count;\n"
<< "};\n";
- OS << "static const IntrinsicTargetInfo TargetInfos[] = {\n";
+ OS << "static constexpr IntrinsicTargetInfo TargetInfos[] = {\n";
for (auto Target : Ints.Targets)
- OS << " {\"" << Target.Name << "\", " << Target.Offset << ", "
- << Target.Count << "},\n";
+ OS << " {llvm::StringLiteral(\"" << Target.Name << "\"), " << Target.Offset
+ << ", " << Target.Count << "},\n";
OS << "};\n";
OS << "#endif\n\n";
}
@@ -214,13 +211,12 @@ enum IIT_Info {
IIT_SAME_VEC_WIDTH_ARG = 31,
IIT_PTR_TO_ARG = 32,
IIT_PTR_TO_ELT = 33,
- IIT_VEC_OF_PTRS_TO_ELT = 34,
+ IIT_VEC_OF_ANYPTRS_TO_ELT = 34,
IIT_I128 = 35,
IIT_V512 = 36,
IIT_V1024 = 37
};
-
static void EncodeFixedValueType(MVT::SimpleValueType VT,
std::vector<unsigned char> &Sig) {
if (MVT(VT).isInteger()) {
@@ -276,9 +272,16 @@ static void EncodeFixedType(Record *R, std::vector<unsigned char> &ArgCodes,
}
else if (R->isSubClassOf("LLVMPointerTo"))
Sig.push_back(IIT_PTR_TO_ARG);
- else if (R->isSubClassOf("LLVMVectorOfPointersToElt"))
- Sig.push_back(IIT_VEC_OF_PTRS_TO_ELT);
- else if (R->isSubClassOf("LLVMPointerToElt"))
+ else if (R->isSubClassOf("LLVMVectorOfAnyPointersToElt")) {
+ Sig.push_back(IIT_VEC_OF_ANYPTRS_TO_ELT);
+ unsigned ArgNo = ArgCodes.size();
+ ArgCodes.push_back(3 /*vAny*/);
+ // Encode overloaded ArgNo
+ Sig.push_back(ArgNo);
+ // Encode LLVMMatchType<Number> ArgNo
+ Sig.push_back(Number);
+ return;
+ } else if (R->isSubClassOf("LLVMPointerToElt"))
Sig.push_back(IIT_PTR_TO_ELT);
else
Sig.push_back(IIT_ARG);
@@ -479,6 +482,12 @@ struct AttributeComparator {
if (L->isConvergent != R->isConvergent)
return R->isConvergent;
+ if (L->isSpeculatable != R->isSpeculatable)
+ return R->isSpeculatable;
+
+ if (L->hasSideEffects != R->hasSideEffects)
+ return R->hasSideEffects;
+
// Try to order by readonly/readnone attribute.
CodeGenIntrinsic::ModRefBehavior LK = L->ModRef;
CodeGenIntrinsic::ModRefBehavior RK = R->ModRef;
@@ -497,10 +506,10 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
OS << "// Add parameter attributes that are not common to all intrinsics.\n";
OS << "#ifdef GET_INTRINSIC_ATTRIBUTES\n";
if (TargetOnly)
- OS << "static AttributeSet getAttributes(LLVMContext &C, " << TargetPrefix
+ OS << "static AttributeList getAttributes(LLVMContext &C, " << TargetPrefix
<< "Intrinsic::ID id) {\n";
else
- OS << "AttributeSet Intrinsic::getAttributes(LLVMContext &C, ID id) {\n";
+ OS << "AttributeList Intrinsic::getAttributes(LLVMContext &C, ID id) {\n";
// Compute the maximum number of attribute arguments and the map
typedef std::map<const CodeGenIntrinsic*, unsigned,
@@ -518,7 +527,7 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
N = ++AttrNum;
}
- // Emit an array of AttributeSet. Most intrinsics will have at least one
+ // Emit an array of AttributeList. Most intrinsics will have at least one
// entry, for the function itself (index ~1), which is usually nounwind.
OS << " static const uint8_t IntrinsicsToAttributesMap[] = {\n";
@@ -530,7 +539,7 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
}
OS << " };\n\n";
- OS << " AttributeSet AS[" << maxArgAttrs+1 << "];\n";
+ OS << " AttributeList AS[" << maxArgAttrs + 1 << "];\n";
OS << " unsigned NumAttrs = 0;\n";
OS << " if (id != 0) {\n";
OS << " switch(IntrinsicsToAttributesMap[id - ";
@@ -554,8 +563,9 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
if (ae) {
while (ai != ae) {
unsigned argNo = intrinsic.ArgumentAttributes[ai].first;
+ unsigned attrIdx = argNo + 1; // Must match AttributeList::FirstArgIndex
- OS << " const Attribute::AttrKind AttrParam" << argNo + 1 <<"[]= {";
+ OS << " const Attribute::AttrKind AttrParam" << attrIdx << "[]= {";
bool addComma = false;
do {
@@ -595,15 +605,15 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
++ai;
} while (ai != ae && intrinsic.ArgumentAttributes[ai].first == argNo);
OS << "};\n";
- OS << " AS[" << numAttrs++ << "] = AttributeSet::get(C, "
- << argNo+1 << ", AttrParam" << argNo +1 << ");\n";
+ OS << " AS[" << numAttrs++ << "] = AttributeList::get(C, "
+ << attrIdx << ", AttrParam" << attrIdx << ");\n";
}
}
if (!intrinsic.canThrow ||
intrinsic.ModRef != CodeGenIntrinsic::ReadWriteMem ||
intrinsic.isNoReturn || intrinsic.isNoDuplicate ||
- intrinsic.isConvergent) {
+ intrinsic.isConvergent || intrinsic.isSpeculatable) {
OS << " const Attribute::AttrKind Atts[] = {";
bool addComma = false;
if (!intrinsic.canThrow) {
@@ -628,6 +638,12 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
OS << "Attribute::Convergent";
addComma = true;
}
+ if (intrinsic.isSpeculatable) {
+ if (addComma)
+ OS << ",";
+ OS << "Attribute::Speculatable";
+ addComma = true;
+ }
switch (intrinsic.ModRef) {
case CodeGenIntrinsic::NoMem:
@@ -699,8 +715,8 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
break;
}
OS << "};\n";
- OS << " AS[" << numAttrs++ << "] = AttributeSet::get(C, "
- << "AttributeSet::FunctionIndex, Atts);\n";
+ OS << " AS[" << numAttrs++ << "] = AttributeList::get(C, "
+ << "AttributeList::FunctionIndex, Atts);\n";
}
if (numAttrs) {
@@ -708,14 +724,14 @@ void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
OS << " break;\n";
OS << " }\n";
} else {
- OS << " return AttributeSet();\n";
+ OS << " return AttributeList();\n";
OS << " }\n";
}
}
OS << " }\n";
OS << " }\n";
- OS << " return AttributeSet::get(C, makeArrayRef(AS, NumAttrs));\n";
+ OS << " return AttributeList::get(C, makeArrayRef(AS, NumAttrs));\n";
OS << "}\n";
OS << "#endif // GET_INTRINSIC_ATTRIBUTES\n\n";
}
@@ -756,6 +772,17 @@ void IntrinsicEmitter::EmitIntrinsicToBuiltinMap(
<< "Builtin(const char "
<< "*TargetPrefixStr, StringRef BuiltinNameStr) {\n";
}
+
+ if (Table.Empty()) {
+ OS << " return ";
+ if (!TargetPrefix.empty())
+ OS << "(" << TargetPrefix << "Intrinsic::ID)";
+ OS << "Intrinsic::not_intrinsic;\n";
+ OS << "}\n";
+ OS << "#endif\n\n";
+ return;
+ }
+
OS << " static const char BuiltinNames[] = {\n";
Table.EmitCharArray(OS);
OS << " };\n\n";
diff --git a/contrib/llvm/utils/TableGen/OptParserEmitter.cpp b/contrib/llvm/utils/TableGen/OptParserEmitter.cpp
index c1b5e65..e3777d0 100644
--- a/contrib/llvm/utils/TableGen/OptParserEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/OptParserEmitter.cpp
@@ -21,6 +21,8 @@ using namespace llvm;
// Ordering on Info. The logic should match with the consumer-side function in
// llvm/Option/OptTable.h.
+// FIXME: Mmake this take StringRefs instead of null terminated strings to
+// simplify callers.
static int StrCmpOptionName(const char *A, const char *B) {
const char *X = A, *Y = B;
char a = tolower(*A), b = tolower(*B);
@@ -53,22 +55,22 @@ static int CompareOptionRecords(Record *const *Av, Record *const *Bv) {
// Compare options by name, unless they are sentinels.
if (!ASent)
- if (int Cmp = StrCmpOptionName(A->getValueAsString("Name").c_str(),
- B->getValueAsString("Name").c_str()))
+ if (int Cmp = StrCmpOptionName(A->getValueAsString("Name").str().c_str(),
+ B->getValueAsString("Name").str().c_str()))
return Cmp;
if (!ASent) {
- std::vector<std::string> APrefixes = A->getValueAsListOfStrings("Prefixes");
- std::vector<std::string> BPrefixes = B->getValueAsListOfStrings("Prefixes");
-
- for (std::vector<std::string>::const_iterator APre = APrefixes.begin(),
- AEPre = APrefixes.end(),
- BPre = BPrefixes.begin(),
- BEPre = BPrefixes.end();
- APre != AEPre &&
- BPre != BEPre;
- ++APre, ++BPre) {
- if (int Cmp = StrCmpOptionName(APre->c_str(), BPre->c_str()))
+ std::vector<StringRef> APrefixes = A->getValueAsListOfStrings("Prefixes");
+ std::vector<StringRef> BPrefixes = B->getValueAsListOfStrings("Prefixes");
+
+ for (std::vector<StringRef>::const_iterator APre = APrefixes.begin(),
+ AEPre = APrefixes.end(),
+ BPre = BPrefixes.begin(),
+ BEPre = BPrefixes.end();
+ APre != AEPre &&
+ BPre != BEPre;
+ ++APre, ++BPre) {
+ if (int Cmp = StrCmpOptionName(APre->str().c_str(), BPre->str().c_str()))
return Cmp;
}
}
@@ -122,7 +124,7 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS) {
unsigned CurPrefix = 0;
for (unsigned i = 0, e = Opts.size(); i != e; ++i) {
const Record &R = *Opts[i];
- std::vector<std::string> prf = R.getValueAsListOfStrings("Prefixes");
+ std::vector<StringRef> prf = R.getValueAsListOfStrings("Prefixes");
PrefixKeyT prfkey(prf.begin(), prf.end());
unsigned NewPrefix = CurPrefix + 1;
if (Prefixes.insert(std::make_pair(prfkey, (Twine("prefix_") +
@@ -194,6 +196,9 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS) {
OS << ", nullptr";
// The option meta-variable name (unused).
+ OS << ", nullptr";
+
+ // The option Values (unused for groups).
OS << ", nullptr)\n";
}
OS << "\n";
@@ -207,7 +212,7 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS) {
OS << "OPTION(";
// The option prefix;
- std::vector<std::string> prf = R.getValueAsListOfStrings("Prefixes");
+ std::vector<StringRef> prf = R.getValueAsListOfStrings("Prefixes");
OS << Prefixes[PrefixKeyT(prf.begin(), prf.end())] << ", ";
// The option string.
@@ -240,7 +245,7 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS) {
// would become "foo\0bar\0". Note that the compiler adds an implicit
// terminating \0 at the end.
OS << ", ";
- std::vector<std::string> AliasArgs = R.getValueAsListOfStrings("AliasArgs");
+ std::vector<StringRef> AliasArgs = R.getValueAsListOfStrings("AliasArgs");
if (AliasArgs.size() == 0) {
OS << "nullptr";
} else {
@@ -283,6 +288,13 @@ void EmitOptParser(RecordKeeper &Records, raw_ostream &OS) {
else
OS << "nullptr";
+ // The option Values. Used for shell autocompletion.
+ OS << ", ";
+ if (!isa<UnsetInit>(R.getValueInit("Values")))
+ write_cstring(OS, R.getValueAsString("Values"));
+ else
+ OS << "nullptr";
+
OS << ")\n";
}
OS << "#endif // OPTION\n";
diff --git a/contrib/llvm/utils/TableGen/RegisterBankEmitter.cpp b/contrib/llvm/utils/TableGen/RegisterBankEmitter.cpp
new file mode 100644
index 0000000..880d075
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/RegisterBankEmitter.cpp
@@ -0,0 +1,320 @@
+//===- RegisterBankEmitter.cpp - Generate a Register Bank Desc. -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting a description of a target
+// register bank for a code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+
+#include "CodeGenRegisters.h"
+
+#define DEBUG_TYPE "register-bank-emitter"
+
+using namespace llvm;
+
+namespace {
+class RegisterBank {
+
+ /// A vector of register classes that are included in the register bank.
+ typedef std::vector<const CodeGenRegisterClass *> RegisterClassesTy;
+
+private:
+ const Record &TheDef;
+
+ /// The register classes that are covered by the register bank.
+ RegisterClassesTy RCs;
+
+ /// The register class with the largest register size.
+ const CodeGenRegisterClass *RCWithLargestRegsSize;
+
+public:
+ RegisterBank(const Record &TheDef)
+ : TheDef(TheDef), RCs(), RCWithLargestRegsSize(nullptr) {}
+
+ /// Get the human-readable name for the bank.
+ StringRef getName() const { return TheDef.getValueAsString("Name"); }
+ /// Get the name of the enumerator in the ID enumeration.
+ std::string getEnumeratorName() const { return (TheDef.getName() + "ID").str(); }
+
+ /// Get the name of the array holding the register class coverage data;
+ std::string getCoverageArrayName() const {
+ return (TheDef.getName() + "CoverageData").str();
+ }
+
+ /// Get the name of the global instance variable.
+ StringRef getInstanceVarName() const { return TheDef.getName(); }
+
+ const Record &getDef() const { return TheDef; }
+
+ /// Get the register classes listed in the RegisterBank.RegisterClasses field.
+ std::vector<const CodeGenRegisterClass *>
+ getExplictlySpecifiedRegisterClasses(
+ CodeGenRegBank &RegisterClassHierarchy) const {
+ std::vector<const CodeGenRegisterClass *> RCs;
+ for (const auto &RCDef : getDef().getValueAsListOfDefs("RegisterClasses"))
+ RCs.push_back(RegisterClassHierarchy.getRegClass(RCDef));
+ return RCs;
+ }
+
+ /// Add a register class to the bank without duplicates.
+ void addRegisterClass(const CodeGenRegisterClass *RC) {
+ if (std::find_if(RCs.begin(), RCs.end(),
+ [&RC](const CodeGenRegisterClass *X) {
+ return X == RC;
+ }) != RCs.end())
+ return;
+
+ // FIXME? We really want the register size rather than the spill size
+ // since the spill size may be bigger on some targets with
+ // limited load/store instructions. However, we don't store the
+ // register size anywhere (we could sum the sizes of the subregisters
+ // but there may be additional bits too) and we can't derive it from
+ // the VT's reliably due to Untyped.
+ if (RCWithLargestRegsSize == nullptr)
+ RCWithLargestRegsSize = RC;
+ else if (RCWithLargestRegsSize->SpillSize < RC->SpillSize)
+ RCWithLargestRegsSize = RC;
+ assert(RCWithLargestRegsSize && "RC was nullptr?");
+
+ RCs.emplace_back(RC);
+ }
+
+ const CodeGenRegisterClass *getRCWithLargestRegsSize() const {
+ return RCWithLargestRegsSize;
+ }
+
+ iterator_range<typename RegisterClassesTy::const_iterator>
+ register_classes() const {
+ return llvm::make_range(RCs.begin(), RCs.end());
+ }
+};
+
+class RegisterBankEmitter {
+private:
+ RecordKeeper &Records;
+ CodeGenRegBank RegisterClassHierarchy;
+
+ void emitHeader(raw_ostream &OS, const StringRef TargetName,
+ const std::vector<RegisterBank> &Banks);
+ void emitBaseClassDefinition(raw_ostream &OS, const StringRef TargetName,
+ const std::vector<RegisterBank> &Banks);
+ void emitBaseClassImplementation(raw_ostream &OS, const StringRef TargetName,
+ std::vector<RegisterBank> &Banks);
+
+public:
+ RegisterBankEmitter(RecordKeeper &R)
+ : Records(R), RegisterClassHierarchy(Records) {}
+
+ void run(raw_ostream &OS);
+};
+
+} // end anonymous namespace
+
+/// Emit code to declare the ID enumeration and external global instance
+/// variables.
+void RegisterBankEmitter::emitHeader(raw_ostream &OS,
+ const StringRef TargetName,
+ const std::vector<RegisterBank> &Banks) {
+ // <Target>RegisterBankInfo.h
+ OS << "namespace llvm {\n"
+ << "namespace " << TargetName << " {\n"
+ << "enum {\n";
+ for (const auto &Bank : Banks)
+ OS << " " << Bank.getEnumeratorName() << ",\n";
+ OS << " NumRegisterBanks,\n"
+ << "};\n"
+ << "} // end namespace " << TargetName << "\n"
+ << "} // end namespace llvm\n";
+}
+
+/// Emit declarations of the <Target>GenRegisterBankInfo class.
+void RegisterBankEmitter::emitBaseClassDefinition(
+ raw_ostream &OS, const StringRef TargetName,
+ const std::vector<RegisterBank> &Banks) {
+ OS << "private:\n"
+ << " static RegisterBank *RegBanks[];\n\n"
+ << "protected:\n"
+ << " " << TargetName << "GenRegisterBankInfo();\n"
+ << "\n";
+}
+
+/// Visit each register class belonging to the given register bank.
+///
+/// A class belongs to the bank iff any of these apply:
+/// * It is explicitly specified
+/// * It is a subclass of a class that is a member.
+/// * It is a class containing subregisters of the registers of a class that
+/// is a member. This is known as a subreg-class.
+///
+/// This function must be called for each explicitly specified register class.
+///
+/// \param RC The register class to search.
+/// \param Kind A debug string containing the path the visitor took to reach RC.
+/// \param VisitFn The action to take for each class visited. It may be called
+/// multiple times for a given class if there are multiple paths
+/// to the class.
+static void visitRegisterBankClasses(
+ CodeGenRegBank &RegisterClassHierarchy, const CodeGenRegisterClass *RC,
+ const Twine Kind,
+ std::function<void(const CodeGenRegisterClass *, StringRef)> VisitFn,
+ SmallPtrSetImpl<const CodeGenRegisterClass *> &VisitedRCs) {
+
+ // Make sure we only visit each class once to avoid infinite loops.
+ if (VisitedRCs.count(RC))
+ return;
+ VisitedRCs.insert(RC);
+
+ // Visit each explicitly named class.
+ VisitFn(RC, Kind.str());
+
+ for (const auto &PossibleSubclass : RegisterClassHierarchy.getRegClasses()) {
+ std::string TmpKind =
+ (Twine(Kind) + " (" + PossibleSubclass.getName() + ")").str();
+
+ // Visit each subclass of an explicitly named class.
+ if (RC != &PossibleSubclass && RC->hasSubClass(&PossibleSubclass))
+ visitRegisterBankClasses(RegisterClassHierarchy, &PossibleSubclass,
+ TmpKind + " " + RC->getName() + " subclass",
+ VisitFn, VisitedRCs);
+
+ // Visit each class that contains only subregisters of RC with a common
+ // subregister-index.
+ //
+ // More precisely, PossibleSubclass is a subreg-class iff Reg:SubIdx is in
+ // PossibleSubclass for all registers Reg from RC using any
+ // subregister-index SubReg
+ for (const auto &SubIdx : RegisterClassHierarchy.getSubRegIndices()) {
+ BitVector BV(RegisterClassHierarchy.getRegClasses().size());
+ PossibleSubclass.getSuperRegClasses(&SubIdx, BV);
+ if (BV.test(RC->EnumValue)) {
+ std::string TmpKind2 = (Twine(TmpKind) + " " + RC->getName() +
+ " class-with-subregs: " + RC->getName())
+ .str();
+ VisitFn(&PossibleSubclass, TmpKind2);
+ }
+ }
+ }
+}
+
+void RegisterBankEmitter::emitBaseClassImplementation(
+ raw_ostream &OS, StringRef TargetName,
+ std::vector<RegisterBank> &Banks) {
+
+ OS << "namespace llvm {\n"
+ << "namespace " << TargetName << " {\n";
+ for (const auto &Bank : Banks) {
+ std::vector<std::vector<const CodeGenRegisterClass *>> RCsGroupedByWord(
+ (RegisterClassHierarchy.getRegClasses().size() + 31) / 32);
+
+ for (const auto &RC : Bank.register_classes())
+ RCsGroupedByWord[RC->EnumValue / 32].push_back(RC);
+
+ OS << "const uint32_t " << Bank.getCoverageArrayName() << "[] = {\n";
+ unsigned LowestIdxInWord = 0;
+ for (const auto &RCs : RCsGroupedByWord) {
+ OS << " // " << LowestIdxInWord << "-" << (LowestIdxInWord + 31) << "\n";
+ for (const auto &RC : RCs) {
+ std::string QualifiedRegClassID =
+ (Twine(RC->Namespace) + "::" + RC->getName() + "RegClassID").str();
+ OS << " (1u << (" << QualifiedRegClassID << " - "
+ << LowestIdxInWord << ")) |\n";
+ }
+ OS << " 0,\n";
+ LowestIdxInWord += 32;
+ }
+ OS << "};\n";
+ }
+ OS << "\n";
+
+ for (const auto &Bank : Banks) {
+ std::string QualifiedBankID =
+ (TargetName + "::" + Bank.getEnumeratorName()).str();
+ unsigned Size = Bank.getRCWithLargestRegsSize()->SpillSize;
+ OS << "RegisterBank " << Bank.getInstanceVarName() << "(/* ID */ "
+ << QualifiedBankID << ", /* Name */ \"" << Bank.getName()
+ << "\", /* Size */ " << Size << ", "
+ << "/* CoveredRegClasses */ " << Bank.getCoverageArrayName()
+ << ", /* NumRegClasses */ "
+ << RegisterClassHierarchy.getRegClasses().size() << ");\n";
+ }
+ OS << "} // end namespace " << TargetName << "\n"
+ << "\n";
+
+ OS << "RegisterBank *" << TargetName
+ << "GenRegisterBankInfo::RegBanks[] = {\n";
+ for (const auto &Bank : Banks)
+ OS << " &" << TargetName << "::" << Bank.getInstanceVarName() << ",\n";
+ OS << "};\n\n";
+
+ OS << TargetName << "GenRegisterBankInfo::" << TargetName
+ << "GenRegisterBankInfo()\n"
+ << " : RegisterBankInfo(RegBanks, " << TargetName
+ << "::NumRegisterBanks) {\n"
+ << " // Assert that RegBank indices match their ID's\n"
+ << "#ifndef NDEBUG\n"
+ << " unsigned Index = 0;\n"
+ << " for (const auto &RB : RegBanks)\n"
+ << " assert(Index++ == RB->getID() && \"Index != ID\");\n"
+ << "#endif // NDEBUG\n"
+ << "}\n"
+ << "} // end namespace llvm\n";
+}
+
+void RegisterBankEmitter::run(raw_ostream &OS) {
+ std::vector<Record*> Targets = Records.getAllDerivedDefinitions("Target");
+ if (Targets.size() != 1)
+ PrintFatalError("ERROR: Too many or too few subclasses of Target defined!");
+ StringRef TargetName = Targets[0]->getName();
+
+ std::vector<RegisterBank> Banks;
+ for (const auto &V : Records.getAllDerivedDefinitions("RegisterBank")) {
+ SmallPtrSet<const CodeGenRegisterClass *, 8> VisitedRCs;
+ RegisterBank Bank(*V);
+
+ for (const CodeGenRegisterClass *RC :
+ Bank.getExplictlySpecifiedRegisterClasses(RegisterClassHierarchy)) {
+ visitRegisterBankClasses(
+ RegisterClassHierarchy, RC, "explicit",
+ [&Bank](const CodeGenRegisterClass *RC, StringRef Kind) {
+ DEBUG(dbgs() << "Added " << RC->getName() << "(" << Kind << ")\n");
+ Bank.addRegisterClass(RC);
+ }, VisitedRCs);
+ }
+
+ Banks.push_back(Bank);
+ }
+
+ emitSourceFileHeader("Register Bank Source Fragments", OS);
+ OS << "#ifdef GET_REGBANK_DECLARATIONS\n"
+ << "#undef GET_REGBANK_DECLARATIONS\n";
+ emitHeader(OS, TargetName, Banks);
+ OS << "#endif // GET_REGBANK_DECLARATIONS\n\n"
+ << "#ifdef GET_TARGET_REGBANK_CLASS\n"
+ << "#undef GET_TARGET_REGBANK_CLASS\n";
+ emitBaseClassDefinition(OS, TargetName, Banks);
+ OS << "#endif // GET_TARGET_REGBANK_CLASS\n\n"
+ << "#ifdef GET_TARGET_REGBANK_IMPL\n"
+ << "#undef GET_TARGET_REGBANK_IMPL\n";
+ emitBaseClassImplementation(OS, TargetName, Banks);
+ OS << "#endif // GET_TARGET_REGBANK_IMPL\n";
+}
+
+namespace llvm {
+
+void EmitRegisterBank(RecordKeeper &RK, raw_ostream &OS) {
+ RegisterBankEmitter(RK).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index b75be13..bebb1a1 100644
--- a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -93,8 +93,7 @@ void RegisterInfoEmitter::runEnums(raw_ostream &OS,
// Register enums are stored as uint16_t in the tables. Make sure we'll fit.
assert(Registers.size() <= 0xffff && "Too many regs to fit in tables");
- std::string Namespace =
- Registers.front().TheDef->getValueAsString("Namespace");
+ StringRef Namespace = Registers.front().TheDef->getValueAsString("Namespace");
emitSourceFileHeader("Target Register Enum Values", OS);
@@ -354,7 +353,7 @@ void RegisterInfoEmitter::EmitRegMappingTables(
for (unsigned i = I->second.size(), e = maxLength; i != e; ++i)
I->second.push_back(-1);
- std::string Namespace = Regs.front().TheDef->getValueAsString("Namespace");
+ StringRef Namespace = Regs.front().TheDef->getValueAsString("Namespace");
OS << "// " << Namespace << " Dwarf<->LLVM register mappings.\n";
@@ -464,7 +463,7 @@ void RegisterInfoEmitter::EmitRegMapping(
if (!maxLength)
return;
- std::string Namespace = Regs.front().TheDef->getValueAsString("Namespace");
+ StringRef Namespace = Regs.front().TheDef->getValueAsString("Namespace");
// Emit reverse information about the dwarf register numbers.
for (unsigned j = 0; j < 2; ++j) {
@@ -1023,18 +1022,14 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
<< "MCRegisterClasses[] = {\n";
for (const auto &RC : RegisterClasses) {
- // Asserts to make sure values will fit in table assuming types from
- // MCRegisterInfo.h
- assert((RC.SpillSize/8) <= 0xffff && "SpillSize too large.");
- assert((RC.SpillAlignment/8) <= 0xffff && "SpillAlignment too large.");
- assert(RC.CopyCost >= -128 && RC.CopyCost <= 127 && "Copy cost too large.");
-
+ assert(isInt<8>(RC.CopyCost) && "Copy cost too large.");
+ // Register size and spill size will become independent, but are not at
+ // the moment. For now use SpillSize as the register size.
OS << " { " << RC.getName() << ", " << RC.getName() << "Bits, "
<< RegClassStrings.get(RC.getName()) << ", "
<< RC.getOrder().size() << ", sizeof(" << RC.getName() << "Bits), "
<< RC.getQualifiedName() + "RegClassID" << ", "
<< RC.SpillSize/8 << ", "
- << RC.SpillAlignment/8 << ", "
<< RC.CopyCost << ", "
<< ( RC.Allocatable ? "true" : "false" ) << " },\n";
}
@@ -1200,7 +1195,8 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
OS << "\" };\n\n";
// Emit SubRegIndex lane masks, including 0.
- OS << "\nstatic const LaneBitmask SubRegIndexLaneMaskTable[] = {\n LaneBitmask::getAll(),\n";
+ OS << "\nstatic const LaneBitmask SubRegIndexLaneMaskTable[] = {\n "
+ "LaneBitmask::getAll(),\n";
for (const auto &Idx : SubRegIndices) {
printMask(OS << " ", Idx.LaneMask);
OS << ", // " << Idx.getName() << '\n';
@@ -1239,7 +1235,8 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
BitVector MaskBV(RegisterClasses.size());
for (const auto &RC : RegisterClasses) {
- OS << "static const uint32_t " << RC.getName() << "SubClassMask[] = {\n ";
+ OS << "static const uint32_t " << RC.getName()
+ << "SubClassMask[] = {\n ";
printBitVectorAsHex(OS, RC.getSubClasses(), 32);
// Emit super-reg class masks for any relevant SubRegIndices that can
@@ -1316,9 +1313,13 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
<< " { // Register class instances\n";
for (const auto &RC : RegisterClasses) {
+ assert(isUInt<16>(RC.SpillSize/8) && "SpillSize too large.");
+ assert(isUInt<16>(RC.SpillAlignment/8) && "SpillAlignment too large.");
OS << " extern const TargetRegisterClass " << RC.getName()
<< "RegClass = {\n " << '&' << Target.getName()
<< "MCRegisterClasses[" << RC.getName() << "RegClassID],\n "
+ << RC.SpillSize/8 << ", /* SpillSize */\n "
+ << RC.SpillAlignment/8 << ", /* SpillAlignment */\n "
<< "VTLists + " << VTSeqs.get(RC.VTs) << ",\n " << RC.getName()
<< "SubClassMask,\n SuperRegIdxSeqs + "
<< SuperRegIdxSeqs.get(SuperRegIdxLists[RC.EnumValue]) << ",\n ";
diff --git a/contrib/llvm/utils/TableGen/SearchableTableEmitter.cpp b/contrib/llvm/utils/TableGen/SearchableTableEmitter.cpp
index 80f0b0d..f73c197 100644
--- a/contrib/llvm/utils/TableGen/SearchableTableEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/SearchableTableEmitter.cpp
@@ -112,8 +112,8 @@ private:
void SearchableTableEmitter::emitMappingEnum(std::vector<Record *> &Items,
Record *InstanceClass,
raw_ostream &OS) {
- std::string EnumNameField = InstanceClass->getValueAsString("EnumNameField");
- std::string EnumValueField;
+ StringRef EnumNameField = InstanceClass->getValueAsString("EnumNameField");
+ StringRef EnumValueField;
if (!InstanceClass->isValueUnset("EnumValueField"))
EnumValueField = InstanceClass->getValueAsString("EnumValueField");
@@ -230,7 +230,7 @@ void SearchableTableEmitter::emitLookupDeclaration(StringRef Name,
void SearchableTableEmitter::emitMapping(Record *InstanceClass,
raw_ostream &OS) {
- const std::string &TableName = InstanceClass->getName();
+ StringRef TableName = InstanceClass->getName();
std::vector<Record *> Items = Records.getAllDerivedDefinitions(TableName);
// Gather all the records we're going to need for this particular mapping.
@@ -265,8 +265,8 @@ void SearchableTableEmitter::emitMapping(Record *InstanceClass,
++Idx;
}
- OS << "#ifdef GET_" << StringRef(TableName).upper() << "_DECL\n";
- OS << "#undef GET_" << StringRef(TableName).upper() << "_DECL\n";
+ OS << "#ifdef GET_" << TableName.upper() << "_DECL\n";
+ OS << "#undef GET_" << TableName.upper() << "_DECL\n";
// Next emit the enum containing the top-level names for use in C++ code if
// requested
@@ -281,8 +281,8 @@ void SearchableTableEmitter::emitMapping(Record *InstanceClass,
OS << "#endif\n\n";
- OS << "#ifdef GET_" << StringRef(TableName).upper() << "_IMPL\n";
- OS << "#undef GET_" << StringRef(TableName).upper() << "_IMPL\n";
+ OS << "#ifdef GET_" << TableName.upper() << "_IMPL\n";
+ OS << "#undef GET_" << TableName.upper() << "_IMPL\n";
// The primary data table contains all the fields defined for this map.
emitPrimaryTable(TableName, FieldNames, SearchFieldNames, SearchTables, Items,
diff --git a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
index bf7b392..d1d873b 100644
--- a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -180,9 +180,9 @@ unsigned SubtargetEmitter::FeatureKeyValues(raw_ostream &OS) {
// Next feature
Record *Feature = FeatureList[i];
- const std::string &Name = Feature->getName();
- const std::string &CommandLineName = Feature->getValueAsString("Name");
- const std::string &Desc = Feature->getValueAsString("Desc");
+ StringRef Name = Feature->getName();
+ StringRef CommandLineName = Feature->getValueAsString("Name");
+ StringRef Desc = Feature->getValueAsString("Desc");
if (CommandLineName.empty()) continue;
@@ -237,7 +237,7 @@ unsigned SubtargetEmitter::CPUKeyValues(raw_ostream &OS) {
// Next processor
Record *Processor = ProcessorList[i];
- const std::string &Name = Processor->getValueAsString("Name");
+ StringRef Name = Processor->getValueAsString("Name");
const std::vector<Record*> &FeatureList =
Processor->getValueAsListOfDefs("Features");
@@ -375,7 +375,7 @@ EmitStageAndOperandCycleData(raw_ostream &OS,
if (FUs.empty())
continue;
- const std::string &Name = ProcModel.ItinsDef->getName();
+ StringRef Name = ProcModel.ItinsDef->getName();
OS << "\n// Functional units for \"" << Name << "\"\n"
<< "namespace " << Name << "FU {\n";
@@ -415,7 +415,7 @@ EmitStageAndOperandCycleData(raw_ostream &OS,
BypassTable += " 0, // No itinerary\n";
// For each Itinerary across all processors, add a unique entry to the stages,
- // operand cycles, and pipepine bypess tables. Then add the new Itinerary
+ // operand cycles, and pipeline bypass tables. Then add the new Itinerary
// object with computed offsets to the ProcItinLists result.
unsigned StageCount = 1, OperandCycleCount = 1;
std::map<std::string, unsigned> ItinStageMap, ItinOperandMap;
@@ -429,7 +429,7 @@ EmitStageAndOperandCycleData(raw_ostream &OS,
if (!ProcModel.hasItineraries())
continue;
- const std::string &Name = ProcModel.ItinsDef->getName();
+ StringRef Name = ProcModel.ItinsDef->getName();
ItinList.resize(SchedModels.numInstrSchedClasses());
assert(ProcModel.ItinDefList.size() == ItinList.size() && "bad Itins");
@@ -546,9 +546,6 @@ EmitItineraries(raw_ostream &OS,
if (!ItinsDefSet.insert(ItinsDef).second)
continue;
- // Get processor itinerary name
- const std::string &Name = ItinsDef->getName();
-
// Get the itinerary list for the processor.
assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator");
std::vector<InstrItinerary> &ItinList = *ProcItinListsIter;
@@ -562,7 +559,7 @@ EmitItineraries(raw_ostream &OS,
OS << "static const llvm::InstrItinerary ";
// Begin processor itinerary table
- OS << Name << "[] = {\n";
+ OS << ItinsDef->getName() << "[] = {\n";
// For each itinerary class in CodeGenSchedClass::Index order.
for (unsigned j = 0, M = ItinList.size(); j < M; ++j) {
@@ -805,6 +802,7 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
return;
std::vector<MCSchedClassDesc> &SCTab = SchedTables.ProcSchedClasses.back();
+ DEBUG(dbgs() << "\n+++ SCHED CLASSES (GenSchedClassTables) +++\n");
for (const CodeGenSchedClass &SC : SchedModels.schedClasses()) {
DEBUG(SC.dump(&SchedModels));
@@ -917,6 +915,8 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
SCDesc.NumMicroOps += WriteRes->getValueAsInt("NumMicroOps");
SCDesc.BeginGroup |= WriteRes->getValueAsBit("BeginGroup");
SCDesc.EndGroup |= WriteRes->getValueAsBit("EndGroup");
+ SCDesc.BeginGroup |= WriteRes->getValueAsBit("SingleIssue");
+ SCDesc.EndGroup |= WriteRes->getValueAsBit("SingleIssue");
// Create an entry for each ProcResource listed in WriteRes.
RecVec PRVec = WriteRes->getValueAsListOfDefs("ProcResources");
@@ -1210,7 +1210,7 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
// Next processor
Record *Processor = ProcessorList[i];
- const std::string &Name = Processor->getValueAsString("Name");
+ StringRef Name = Processor->getValueAsString("Name");
const std::string &ProcModelName =
SchedModels.getModelForProc(Processor).ModelName;
@@ -1358,9 +1358,9 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS,
for (Record *R : Features) {
// Next record
- const std::string &Instance = R->getName();
- const std::string &Value = R->getValueAsString("Value");
- const std::string &Attribute = R->getValueAsString("Attribute");
+ StringRef Instance = R->getName();
+ StringRef Value = R->getValueAsString("Value");
+ StringRef Attribute = R->getValueAsString("Attribute");
if (Value=="true" || Value=="false")
OS << " if (Bits[" << Target << "::"
diff --git a/contrib/llvm/utils/TableGen/SubtargetFeatureInfo.cpp b/contrib/llvm/utils/TableGen/SubtargetFeatureInfo.cpp
index 7db8813..5153c35 100644
--- a/contrib/llvm/utils/TableGen/SubtargetFeatureInfo.cpp
+++ b/contrib/llvm/utils/TableGen/SubtargetFeatureInfo.cpp
@@ -16,10 +16,11 @@
using namespace llvm;
-void SubtargetFeatureInfo::dump() const {
- errs() << getEnumName() << " " << Index << "\n";
- TheDef->dump();
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void SubtargetFeatureInfo::dump() const {
+ errs() << getEnumName() << " " << Index << "\n" << *TheDef;
}
+#endif
std::vector<std::pair<Record *, SubtargetFeatureInfo>>
SubtargetFeatureInfo::getAll(const RecordKeeper &Records) {
@@ -44,8 +45,7 @@ SubtargetFeatureInfo::getAll(const RecordKeeper &Records) {
}
void SubtargetFeatureInfo::emitSubtargetFeatureFlagEnumeration(
- std::map<Record *, SubtargetFeatureInfo, LessRecordByID> &SubtargetFeatures,
- raw_ostream &OS) {
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS) {
OS << "// Flags for subtarget features that participate in "
<< "instruction matching.\n";
OS << "enum SubtargetFeatureFlag : "
@@ -58,14 +58,39 @@ void SubtargetFeatureInfo::emitSubtargetFeatureFlagEnumeration(
OS << "};\n\n";
}
-void SubtargetFeatureInfo::emitNameTable(
- std::map<Record *, SubtargetFeatureInfo, LessRecordByID> &SubtargetFeatures,
- raw_ostream &OS) {
- OS << "static const char *SubtargetFeatureNames[] = {\n";
+void SubtargetFeatureInfo::emitSubtargetFeatureBitEnumeration(
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS) {
+ OS << "// Bits for subtarget features that participate in "
+ << "instruction matching.\n";
+ OS << "enum SubtargetFeatureBits : "
+ << getMinimalTypeForRange(SubtargetFeatures.size()) << " {\n";
for (const auto &SF : SubtargetFeatures) {
const SubtargetFeatureInfo &SFI = SF.second;
- OS << " \"" << SFI.getEnumName() << "\",\n";
+ OS << " " << SFI.getEnumBitName() << " = " << SFI.Index << ",\n";
}
+ OS << "};\n\n";
+}
+
+void SubtargetFeatureInfo::emitNameTable(
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS) {
+ // Need to sort the name table so that lookup by the log of the enum value
+ // gives the proper name. More specifically, for a feature of value 1<<n,
+ // SubtargetFeatureNames[n] should be the name of the feature.
+ uint64_t IndexUB = 0;
+ for (const auto &SF : SubtargetFeatures)
+ if (IndexUB <= SF.second.Index)
+ IndexUB = SF.second.Index+1;
+
+ std::vector<std::string> Names;
+ if (IndexUB > 0)
+ Names.resize(IndexUB);
+ for (const auto &SF : SubtargetFeatures)
+ Names[SF.second.Index] = SF.second.getEnumName();
+
+ OS << "static const char *SubtargetFeatureNames[] = {\n";
+ for (uint64_t I = 0; I < IndexUB; ++I)
+ OS << " \"" << Names[I] << "\",\n";
+
// A small number of targets have no predicates. Null terminate the array to
// avoid a zero-length array.
OS << " nullptr\n"
@@ -74,8 +99,27 @@ void SubtargetFeatureInfo::emitNameTable(
void SubtargetFeatureInfo::emitComputeAvailableFeatures(
StringRef TargetName, StringRef ClassName, StringRef FuncName,
- std::map<Record *, SubtargetFeatureInfo, LessRecordByID> &SubtargetFeatures,
- raw_ostream &OS) {
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS,
+ StringRef ExtraParams) {
+ OS << "PredicateBitset " << TargetName << ClassName << "::\n"
+ << FuncName << "(const " << TargetName << "Subtarget *Subtarget";
+ if (!ExtraParams.empty())
+ OS << ", " << ExtraParams;
+ OS << ") const {\n";
+ OS << " PredicateBitset Features;\n";
+ for (const auto &SF : SubtargetFeatures) {
+ const SubtargetFeatureInfo &SFI = SF.second;
+
+ OS << " if (" << SFI.TheDef->getValueAsString("CondString") << ")\n";
+ OS << " Features[" << SFI.getEnumBitName() << "] = 1;\n";
+ }
+ OS << " return Features;\n";
+ OS << "}\n\n";
+}
+
+void SubtargetFeatureInfo::emitComputeAssemblerAvailableFeatures(
+ StringRef TargetName, StringRef ClassName, StringRef FuncName,
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS) {
OS << "uint64_t " << TargetName << ClassName << "::\n"
<< FuncName << "(const FeatureBitset& FB) const {\n";
OS << " uint64_t Features = 0;\n";
diff --git a/contrib/llvm/utils/TableGen/SubtargetFeatureInfo.h b/contrib/llvm/utils/TableGen/SubtargetFeatureInfo.h
index 99f380f..c55c16a 100644
--- a/contrib/llvm/utils/TableGen/SubtargetFeatureInfo.h
+++ b/contrib/llvm/utils/TableGen/SubtargetFeatureInfo.h
@@ -21,6 +21,9 @@ namespace llvm {
class Record;
class RecordKeeper;
+struct SubtargetFeatureInfo;
+using SubtargetFeatureInfoMap = std::map<Record *, SubtargetFeatureInfo, LessRecordByID>;
+
/// Helper class for storing information on a subtarget feature which
/// participates in instruction matching.
struct SubtargetFeatureInfo {
@@ -37,23 +40,64 @@ struct SubtargetFeatureInfo {
return "Feature_" + TheDef->getName().str();
}
+ /// \brief The name of the enumerated constant identifying the bitnumber for
+ /// this feature.
+ std::string getEnumBitName() const {
+ return "Feature_" + TheDef->getName().str() + "Bit";
+ }
+
+ bool mustRecomputePerFunction() const {
+ return TheDef->getValueAsBit("RecomputePerFunction");
+ }
+
void dump() const;
static std::vector<std::pair<Record *, SubtargetFeatureInfo>>
getAll(const RecordKeeper &Records);
/// Emit the subtarget feature flag definitions.
+ ///
+ /// This version emits the bit value for the feature and is therefore limited
+ /// to 64 feature bits.
static void emitSubtargetFeatureFlagEnumeration(
- std::map<Record *, SubtargetFeatureInfo, LessRecordByID>
- &SubtargetFeatures,
- raw_ostream &OS);
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS);
- static void emitNameTable(std::map<Record *, SubtargetFeatureInfo,
- LessRecordByID> &SubtargetFeatures,
+ /// Emit the subtarget feature flag definitions.
+ ///
+ /// This version emits the bit index for the feature and can therefore support
+ /// more than 64 feature bits.
+ static void
+ emitSubtargetFeatureBitEnumeration(SubtargetFeatureInfoMap &SubtargetFeatures,
+ raw_ostream &OS);
+
+ static void emitNameTable(SubtargetFeatureInfoMap &SubtargetFeatures,
raw_ostream &OS);
/// Emit the function to compute the list of available features given a
/// subtarget.
///
+ /// This version is used for subtarget features defined using Predicate<>
+ /// and supports more than 64 feature bits.
+ ///
+ /// \param TargetName The name of the target as used in class prefixes (e.g.
+ /// <TargetName>Subtarget)
+ /// \param ClassName The name of the class (without the <Target> prefix)
+ /// that will contain the generated functions.
+ /// \param FuncName The name of the function to emit.
+ /// \param SubtargetFeatures A map of TableGen records to the
+ /// SubtargetFeatureInfo equivalent.
+ /// \param ExtraParams Additional arguments to the generated function.
+ static void
+ emitComputeAvailableFeatures(StringRef TargetName, StringRef ClassName,
+ StringRef FuncName,
+ SubtargetFeatureInfoMap &SubtargetFeatures,
+ raw_ostream &OS, StringRef ExtraParams = "");
+
+ /// Emit the function to compute the list of available features given a
+ /// subtarget.
+ ///
+ /// This version is used for subtarget features defined using
+ /// AssemblerPredicate<> and supports up to 64 feature bits.
+ ///
/// \param TargetName The name of the target as used in class prefixes (e.g.
/// <TargetName>Subtarget)
/// \param ClassName The name of the class (without the <Target> prefix)
@@ -61,11 +105,9 @@ struct SubtargetFeatureInfo {
/// \param FuncName The name of the function to emit.
/// \param SubtargetFeatures A map of TableGen records to the
/// SubtargetFeatureInfo equivalent.
- static void emitComputeAvailableFeatures(
+ static void emitComputeAssemblerAvailableFeatures(
StringRef TargetName, StringRef ClassName, StringRef FuncName,
- std::map<Record *, SubtargetFeatureInfo, LessRecordByID>
- &SubtargetFeatures,
- raw_ostream &OS);
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS);
};
} // end namespace llvm
diff --git a/contrib/llvm/utils/TableGen/TableGen.cpp b/contrib/llvm/utils/TableGen/TableGen.cpp
index 79a7731..00d20f1 100644
--- a/contrib/llvm/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/utils/TableGen/TableGen.cpp
@@ -46,6 +46,8 @@ enum ActionType {
GenAttributes,
GenSearchableTables,
GenGlobalISel,
+ GenX86EVEX2VEXTables,
+ GenRegisterBank,
};
namespace {
@@ -94,11 +96,16 @@ namespace {
clEnumValN(GenSearchableTables, "gen-searchable-tables",
"Generate generic binary-searchable table"),
clEnumValN(GenGlobalISel, "gen-global-isel",
- "Generate GlobalISel selector")));
+ "Generate GlobalISel selector"),
+ clEnumValN(GenX86EVEX2VEXTables, "gen-x86-EVEX2VEX-tables",
+ "Generate X86 EVEX to VEX compress tables"),
+ clEnumValN(GenRegisterBank, "gen-register-bank",
+ "Generate registers bank descriptions")));
+ cl::OptionCategory PrintEnumsCat("Options for -print-enums");
cl::opt<std::string>
Class("class", cl::desc("Print Enum list for this class"),
- cl::value_desc("class name"));
+ cl::value_desc("class name"), cl::cat(PrintEnumsCat));
bool LLVMTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
switch (Action) {
@@ -183,6 +190,12 @@ bool LLVMTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenGlobalISel:
EmitGlobalISel(Records, OS);
break;
+ case GenRegisterBank:
+ EmitRegisterBank(Records, OS);
+ break;
+ case GenX86EVEX2VEXTables:
+ EmitX86EVEX2VEXTables(Records, OS);
+ break;
}
return false;
diff --git a/contrib/llvm/utils/TableGen/TableGenBackends.h b/contrib/llvm/utils/TableGen/TableGenBackends.h
index eb306d2..2512997 100644
--- a/contrib/llvm/utils/TableGen/TableGenBackends.h
+++ b/contrib/llvm/utils/TableGen/TableGenBackends.h
@@ -81,6 +81,8 @@ void EmitCTags(RecordKeeper &RK, raw_ostream &OS);
void EmitAttributes(RecordKeeper &RK, raw_ostream &OS);
void EmitSearchableTables(RecordKeeper &RK, raw_ostream &OS);
void EmitGlobalISel(RecordKeeper &RK, raw_ostream &OS);
+void EmitX86EVEX2VEXTables(RecordKeeper &RK, raw_ostream &OS);
+void EmitRegisterBank(RecordKeeper &RK, raw_ostream &OS);
} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/Types.cpp b/contrib/llvm/utils/TableGen/Types.cpp
index 3545829..04d9e40 100644
--- a/contrib/llvm/utils/TableGen/Types.cpp
+++ b/contrib/llvm/utils/TableGen/Types.cpp
@@ -40,5 +40,6 @@ const char *llvm::getMinimalTypeForEnumBitfield(uint64_t Size) {
uint64_t MaxIndex = Size;
if (MaxIndex > 0)
MaxIndex--;
+ assert(MaxIndex <= 64 && "Too many bits");
return getMinimalTypeForRange(1ULL << MaxIndex);
}
diff --git a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
index 5b710e4..c80b969 100644
--- a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -10,7 +10,7 @@
// This file is part of the X86 Disassembler Emitter.
// It contains the implementation of the disassembler tables.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
@@ -879,6 +879,10 @@ void DisassemblerTables::setTableFields(ModRMDecision &decision,
newInfo.name == "XCHG64ar"))
continue; // special case for XCHG*ar and NOOP
+ if (previousInfo.name == "DATA16_PREFIX" &&
+ newInfo.name == "DATA32_PREFIX")
+ continue; // special case for data16 and data32
+
if (outranks(previousInfo.insnContext, newInfo.insnContext))
continue;
diff --git a/contrib/llvm/utils/TableGen/X86DisassemblerTables.h b/contrib/llvm/utils/TableGen/X86DisassemblerTables.h
index 5a8688b..1171c79 100644
--- a/contrib/llvm/utils/TableGen/X86DisassemblerTables.h
+++ b/contrib/llvm/utils/TableGen/X86DisassemblerTables.h
@@ -10,7 +10,7 @@
// This file is part of the X86 Disassembler Emitter.
// It contains the interface of the disassembler tables.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp b/contrib/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
new file mode 100644
index 0000000..07b96b0
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
@@ -0,0 +1,339 @@
+//===- utils/TableGen/X86EVEX2VEXTablesEmitter.cpp - X86 backend-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// This tablegen backend is responsible for emitting the X86 backend EVEX2VEX
+/// compression tables.
+///
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenTarget.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/TableGenBackend.h"
+
+using namespace llvm;
+
+namespace {
+
+class X86EVEX2VEXTablesEmitter {
+ CodeGenTarget Target;
+
+ // Hold all non-masked & non-broadcasted EVEX encoded instructions
+ std::vector<const CodeGenInstruction *> EVEXInsts;
+ // Hold all VEX encoded instructions. Divided into groups with same opcodes
+ // to make the search more efficient
+ std::map<uint64_t, std::vector<const CodeGenInstruction *>> VEXInsts;
+
+ typedef std::pair<const CodeGenInstruction *, const CodeGenInstruction *> Entry;
+
+ // Represent both compress tables
+ std::vector<Entry> EVEX2VEX128;
+ std::vector<Entry> EVEX2VEX256;
+
+ // Represents a manually added entry to the tables
+ struct ManualEntry {
+ const char *EVEXInstStr;
+ const char *VEXInstStr;
+ bool Is128Bit;
+ };
+
+public:
+ X86EVEX2VEXTablesEmitter(RecordKeeper &R) : Target(R) {}
+
+ // run - Output X86 EVEX2VEX tables.
+ void run(raw_ostream &OS);
+
+private:
+ // Prints the given table as a C++ array of type
+ // X86EvexToVexCompressTableEntry
+ void printTable(const std::vector<Entry> &Table, raw_ostream &OS);
+
+ bool inExceptionList(const CodeGenInstruction *Inst) {
+ // List of EVEX instructions that match VEX instructions by the encoding
+ // but do not perform the same operation.
+ static constexpr const char *ExceptionList[] = {
+ "VCVTQQ2PD",
+ "VCVTQQ2PS",
+ "VPMAXSQ",
+ "VPMAXUQ",
+ "VPMINSQ",
+ "VPMINUQ",
+ "VPMULLQ",
+ "VPSRAQ",
+ "VDBPSADBW",
+ "VRNDSCALE",
+ "VSCALEFPS"
+ };
+ // Instruction's name starts with one of the entries in the exception list
+ for (StringRef InstStr : ExceptionList) {
+ if (Inst->TheDef->getName().startswith(InstStr))
+ return true;
+ }
+ return false;
+ }
+
+};
+
+void X86EVEX2VEXTablesEmitter::printTable(const std::vector<Entry> &Table,
+ raw_ostream &OS) {
+ std::string Size = (Table == EVEX2VEX128) ? "128" : "256";
+
+ OS << "// X86 EVEX encoded instructions that have a VEX " << Size
+ << " encoding\n"
+ << "// (table format: <EVEX opcode, VEX-" << Size << " opcode>).\n"
+ << "static const X86EvexToVexCompressTableEntry X86EvexToVex" << Size
+ << "CompressTable[] = {\n"
+ << " // EVEX scalar with corresponding VEX.\n";
+
+ // Print all entries added to the table
+ for (auto Pair : Table) {
+ OS << " { X86::" << Pair.first->TheDef->getName()
+ << ", X86::" << Pair.second->TheDef->getName() << " },\n";
+ }
+
+ // Some VEX instructions were duplicated to multiple EVEX versions due the
+ // introduction of mask variants, and thus some of the EVEX versions have
+ // different encoding than the VEX instruction. In order to maximize the
+ // compression we add these entries manually.
+ static constexpr ManualEntry ManuallyAddedEntries[] = {
+ // EVEX-Inst VEX-Inst Is128-bit
+ {"VMOVDQU8Z128mr", "VMOVDQUmr", true},
+ {"VMOVDQU8Z128rm", "VMOVDQUrm", true},
+ {"VMOVDQU8Z128rr", "VMOVDQUrr", true},
+ {"VMOVDQU8Z128rr_REV", "VMOVDQUrr_REV", true},
+ {"VMOVDQU16Z128mr", "VMOVDQUmr", true},
+ {"VMOVDQU16Z128rm", "VMOVDQUrm", true},
+ {"VMOVDQU16Z128rr", "VMOVDQUrr", true},
+ {"VMOVDQU16Z128rr_REV", "VMOVDQUrr_REV", true},
+ {"VMOVDQU8Z256mr", "VMOVDQUYmr", false},
+ {"VMOVDQU8Z256rm", "VMOVDQUYrm", false},
+ {"VMOVDQU8Z256rr", "VMOVDQUYrr", false},
+ {"VMOVDQU8Z256rr_REV", "VMOVDQUYrr_REV", false},
+ {"VMOVDQU16Z256mr", "VMOVDQUYmr", false},
+ {"VMOVDQU16Z256rm", "VMOVDQUYrm", false},
+ {"VMOVDQU16Z256rr", "VMOVDQUYrr", false},
+ {"VMOVDQU16Z256rr_REV", "VMOVDQUYrr_REV", false},
+
+ {"VPERMILPDZ128mi", "VPERMILPDmi", true},
+ {"VPERMILPDZ128ri", "VPERMILPDri", true},
+ {"VPERMILPDZ128rm", "VPERMILPDrm", true},
+ {"VPERMILPDZ128rr", "VPERMILPDrr", true},
+ {"VPERMILPDZ256mi", "VPERMILPDYmi", false},
+ {"VPERMILPDZ256ri", "VPERMILPDYri", false},
+ {"VPERMILPDZ256rm", "VPERMILPDYrm", false},
+ {"VPERMILPDZ256rr", "VPERMILPDYrr", false},
+
+ {"VPBROADCASTQZ128m", "VPBROADCASTQrm", true},
+ {"VPBROADCASTQZ128r", "VPBROADCASTQrr", true},
+ {"VPBROADCASTQZ256m", "VPBROADCASTQYrm", false},
+ {"VPBROADCASTQZ256r", "VPBROADCASTQYrr", false},
+
+ {"VBROADCASTSDZ256m", "VBROADCASTSDYrm", false},
+ {"VBROADCASTSDZ256r", "VBROADCASTSDYrr", false},
+
+ {"VEXTRACTF64x2Z256mr", "VEXTRACTF128mr", false},
+ {"VEXTRACTF64x2Z256rr", "VEXTRACTF128rr", false},
+ {"VEXTRACTI64x2Z256mr", "VEXTRACTI128mr", false},
+ {"VEXTRACTI64x2Z256rr", "VEXTRACTI128rr", false},
+
+ {"VINSERTF64x2Z256rm", "VINSERTF128rm", false},
+ {"VINSERTF64x2Z256rr", "VINSERTF128rr", false},
+ {"VINSERTI64x2Z256rm", "VINSERTI128rm", false},
+ {"VINSERTI64x2Z256rr", "VINSERTI128rr", false}
+ };
+
+ // Print the manually added entries
+ for (const ManualEntry &Entry : ManuallyAddedEntries) {
+ if ((Table == EVEX2VEX128 && Entry.Is128Bit) ||
+ (Table == EVEX2VEX256 && !Entry.Is128Bit)) {
+ OS << " { X86::" << Entry.EVEXInstStr << ", X86::" << Entry.VEXInstStr
+ << " },\n";
+ }
+ }
+
+ OS << "};\n\n";
+}
+
+// Return true if the 2 BitsInits are equal
+static inline bool equalBitsInits(const BitsInit *B1, const BitsInit *B2) {
+ if (B1->getNumBits() != B2->getNumBits())
+ PrintFatalError("Comparing two BitsInits with different sizes!");
+
+ for (unsigned i = 0, e = B1->getNumBits(); i != e; ++i) {
+ if (BitInit *Bit1 = dyn_cast<BitInit>(B1->getBit(i))) {
+ if (BitInit *Bit2 = dyn_cast<BitInit>(B2->getBit(i))) {
+ if (Bit1->getValue() != Bit2->getValue())
+ return false;
+ } else
+ PrintFatalError("Invalid BitsInit bit");
+ } else
+ PrintFatalError("Invalid BitsInit bit");
+ }
+ return true;
+}
+
+// Calculates the integer value residing BitsInit object
+static inline uint64_t getValueFromBitsInit(const BitsInit *B) {
+ uint64_t Value = 0;
+ for (unsigned i = 0, e = B->getNumBits(); i != e; ++i) {
+ if (BitInit *Bit = dyn_cast<BitInit>(B->getBit(i)))
+ Value |= uint64_t(Bit->getValue()) << i;
+ else
+ PrintFatalError("Invalid VectSize bit");
+ }
+ return Value;
+}
+
+// Function object - Operator() returns true if the given VEX instruction
+// matches the EVEX instruction of this object.
+class IsMatch {
+ const CodeGenInstruction *Inst;
+
+public:
+ IsMatch(const CodeGenInstruction *Inst) : Inst(Inst) {}
+
+ bool operator()(const CodeGenInstruction *Inst2) {
+ Record *Rec1 = Inst->TheDef;
+ Record *Rec2 = Inst2->TheDef;
+ uint64_t Rec1WVEX =
+ getValueFromBitsInit(Rec1->getValueAsBitsInit("VEX_WPrefix"));
+ uint64_t Rec2WVEX =
+ getValueFromBitsInit(Rec2->getValueAsBitsInit("VEX_WPrefix"));
+
+ if (Rec2->getValueAsDef("OpEnc")->getName().str() != "EncVEX" ||
+ // VEX/EVEX fields
+ Rec2->getValueAsDef("OpPrefix") != Rec1->getValueAsDef("OpPrefix") ||
+ Rec2->getValueAsDef("OpMap") != Rec1->getValueAsDef("OpMap") ||
+ Rec2->getValueAsBit("hasVEX_4V") != Rec1->getValueAsBit("hasVEX_4V") ||
+ !equalBitsInits(Rec2->getValueAsBitsInit("EVEX_LL"),
+ Rec1->getValueAsBitsInit("EVEX_LL")) ||
+ (Rec1WVEX != 2 && Rec2WVEX != 2 && Rec1WVEX != Rec2WVEX) ||
+ // Instruction's format
+ Rec2->getValueAsDef("Form") != Rec1->getValueAsDef("Form") ||
+ Rec2->getValueAsBit("isAsmParserOnly") !=
+ Rec1->getValueAsBit("isAsmParserOnly"))
+ return false;
+
+ // This is needed for instructions with intrinsic version (_Int).
+ // Where the only difference is the size of the operands.
+ // For example: VUCOMISDZrm and Int_VUCOMISDrm
+ // Also for instructions that their EVEX version was upgraded to work with
+ // k-registers. For example VPCMPEQBrm (xmm output register) and
+ // VPCMPEQBZ128rm (k register output register).
+ for (unsigned i = 0; i < Inst->Operands.size(); i++) {
+ Record *OpRec1 = Inst->Operands[i].Rec;
+ Record *OpRec2 = Inst2->Operands[i].Rec;
+
+ if (OpRec1 == OpRec2)
+ continue;
+
+ if (isRegisterOperand(OpRec1) && isRegisterOperand(OpRec2)) {
+ if (getRegOperandSize(OpRec1) != getRegOperandSize(OpRec2))
+ return false;
+ } else if (isMemoryOperand(OpRec1) && isMemoryOperand(OpRec2)) {
+ return false;
+ } else if (isImmediateOperand(OpRec1) && isImmediateOperand(OpRec2)) {
+ if (OpRec1->getValueAsDef("Type") != OpRec2->getValueAsDef("Type"))
+ return false;
+ } else
+ return false;
+ }
+
+ return true;
+ }
+
+private:
+ static inline bool isRegisterOperand(const Record *Rec) {
+ return Rec->isSubClassOf("RegisterClass") ||
+ Rec->isSubClassOf("RegisterOperand");
+ }
+
+ static inline bool isMemoryOperand(const Record *Rec) {
+ return Rec->isSubClassOf("Operand") &&
+ Rec->getValueAsString("OperandType") == "OPERAND_MEMORY";
+ }
+
+ static inline bool isImmediateOperand(const Record *Rec) {
+ return Rec->isSubClassOf("Operand") &&
+ Rec->getValueAsString("OperandType") == "OPERAND_IMMEDIATE";
+ }
+
+ static inline unsigned int getRegOperandSize(const Record *RegRec) {
+ if (RegRec->isSubClassOf("RegisterClass"))
+ return RegRec->getValueAsInt("Alignment");
+ if (RegRec->isSubClassOf("RegisterOperand"))
+ return RegRec->getValueAsDef("RegClass")->getValueAsInt("Alignment");
+
+ llvm_unreachable("Register operand's size not known!");
+ }
+};
+
+void X86EVEX2VEXTablesEmitter::run(raw_ostream &OS) {
+ emitSourceFileHeader("X86 EVEX2VEX tables", OS);
+
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ // Filter non-X86 instructions.
+ if (!Inst->TheDef->isSubClassOf("X86Inst"))
+ continue;
+
+ // Add VEX encoded instructions to one of VEXInsts vectors according to
+ // it's opcode.
+ if (Inst->TheDef->getValueAsDef("OpEnc")->getName() == "EncVEX") {
+ uint64_t Opcode = getValueFromBitsInit(Inst->TheDef->
+ getValueAsBitsInit("Opcode"));
+ VEXInsts[Opcode].push_back(Inst);
+ }
+ // Add relevant EVEX encoded instructions to EVEXInsts
+ else if (Inst->TheDef->getValueAsDef("OpEnc")->getName() == "EncEVEX" &&
+ !Inst->TheDef->getValueAsBit("hasEVEX_K") &&
+ !Inst->TheDef->getValueAsBit("hasEVEX_B") &&
+ getValueFromBitsInit(Inst->TheDef->
+ getValueAsBitsInit("EVEX_LL")) != 2 &&
+ !inExceptionList(Inst))
+ EVEXInsts.push_back(Inst);
+ }
+
+ for (const CodeGenInstruction *EVEXInst : EVEXInsts) {
+ uint64_t Opcode = getValueFromBitsInit(EVEXInst->TheDef->
+ getValueAsBitsInit("Opcode"));
+ // For each EVEX instruction look for a VEX match in the appropriate vector
+ // (instructions with the same opcode) using function object IsMatch.
+ auto Match = llvm::find_if(VEXInsts[Opcode], IsMatch(EVEXInst));
+ if (Match != VEXInsts[Opcode].end()) {
+ const CodeGenInstruction *VEXInst = *Match;
+
+ // In case a match is found add new entry to the appropriate table
+ switch (getValueFromBitsInit(
+ EVEXInst->TheDef->getValueAsBitsInit("EVEX_LL"))) {
+ case 0:
+ EVEX2VEX128.push_back(std::make_pair(EVEXInst, VEXInst)); // {0,0}
+ break;
+ case 1:
+ EVEX2VEX256.push_back(std::make_pair(EVEXInst, VEXInst)); // {0,1}
+ break;
+ default:
+ llvm_unreachable("Instruction's size not fit for the mapping!");
+ }
+ }
+ }
+
+ // Print both tables
+ printTable(EVEX2VEX128, OS);
+ printTable(EVEX2VEX256, OS);
+}
+}
+
+namespace llvm {
+void EmitX86EVEX2VEXTables(RecordKeeper &RK, raw_ostream &OS) {
+ X86EVEX2VEXTablesEmitter(RK).run(OS);
+}
+}
diff --git a/contrib/llvm/utils/TableGen/X86ModRMFilters.h b/contrib/llvm/utils/TableGen/X86ModRMFilters.h
index d919c58..73d5602 100644
--- a/contrib/llvm/utils/TableGen/X86ModRMFilters.h
+++ b/contrib/llvm/utils/TableGen/X86ModRMFilters.h
@@ -11,7 +11,7 @@
// It contains ModR/M filters that determine which values of the ModR/M byte
// are valid for a partiuclar instruction.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
index 4736c4e..202a71a 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -10,7 +10,7 @@
// This file is part of the X86 Disassembler Emitter.
// It contains the implementation of a single recognizable instruction.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
@@ -21,125 +21,6 @@
#include <string>
using namespace llvm;
-
-#define MRM_MAPPING \
- MAP(C0, 64) \
- MAP(C1, 65) \
- MAP(C2, 66) \
- MAP(C3, 67) \
- MAP(C4, 68) \
- MAP(C5, 69) \
- MAP(C6, 70) \
- MAP(C7, 71) \
- MAP(C8, 72) \
- MAP(C9, 73) \
- MAP(CA, 74) \
- MAP(CB, 75) \
- MAP(CC, 76) \
- MAP(CD, 77) \
- MAP(CE, 78) \
- MAP(CF, 79) \
- MAP(D0, 80) \
- MAP(D1, 81) \
- MAP(D2, 82) \
- MAP(D3, 83) \
- MAP(D4, 84) \
- MAP(D5, 85) \
- MAP(D6, 86) \
- MAP(D7, 87) \
- MAP(D8, 88) \
- MAP(D9, 89) \
- MAP(DA, 90) \
- MAP(DB, 91) \
- MAP(DC, 92) \
- MAP(DD, 93) \
- MAP(DE, 94) \
- MAP(DF, 95) \
- MAP(E0, 96) \
- MAP(E1, 97) \
- MAP(E2, 98) \
- MAP(E3, 99) \
- MAP(E4, 100) \
- MAP(E5, 101) \
- MAP(E6, 102) \
- MAP(E7, 103) \
- MAP(E8, 104) \
- MAP(E9, 105) \
- MAP(EA, 106) \
- MAP(EB, 107) \
- MAP(EC, 108) \
- MAP(ED, 109) \
- MAP(EE, 110) \
- MAP(EF, 111) \
- MAP(F0, 112) \
- MAP(F1, 113) \
- MAP(F2, 114) \
- MAP(F3, 115) \
- MAP(F4, 116) \
- MAP(F5, 117) \
- MAP(F6, 118) \
- MAP(F7, 119) \
- MAP(F8, 120) \
- MAP(F9, 121) \
- MAP(FA, 122) \
- MAP(FB, 123) \
- MAP(FC, 124) \
- MAP(FD, 125) \
- MAP(FE, 126) \
- MAP(FF, 127)
-
-// A clone of X86 since we can't depend on something that is generated.
-namespace X86Local {
- enum {
- Pseudo = 0,
- RawFrm = 1,
- AddRegFrm = 2,
- RawFrmMemOffs = 3,
- RawFrmSrc = 4,
- RawFrmDst = 5,
- RawFrmDstSrc = 6,
- RawFrmImm8 = 7,
- RawFrmImm16 = 8,
- MRMDestMem = 32,
- MRMSrcMem = 33,
- MRMSrcMem4VOp3 = 34,
- MRMSrcMemOp4 = 35,
- MRMXm = 39,
- MRM0m = 40, MRM1m = 41, MRM2m = 42, MRM3m = 43,
- MRM4m = 44, MRM5m = 45, MRM6m = 46, MRM7m = 47,
- MRMDestReg = 48,
- MRMSrcReg = 49,
- MRMSrcReg4VOp3 = 50,
- MRMSrcRegOp4 = 51,
- MRMXr = 55,
- MRM0r = 56, MRM1r = 57, MRM2r = 58, MRM3r = 59,
- MRM4r = 60, MRM5r = 61, MRM6r = 62, MRM7r = 63,
-#define MAP(from, to) MRM_##from = to,
- MRM_MAPPING
-#undef MAP
- };
-
- enum {
- OB = 0, TB = 1, T8 = 2, TA = 3, XOP8 = 4, XOP9 = 5, XOPA = 6
- };
-
- enum {
- PS = 1, PD = 2, XS = 3, XD = 4
- };
-
- enum {
- VEX = 1, XOP = 2, EVEX = 3
- };
-
- enum {
- OpSize16 = 1, OpSize32 = 2
- };
-
- enum {
- AdSize16 = 1, AdSize32 = 2, AdSize64 = 3
- };
-}
-
using namespace X86Disassembler;
/// byteFromBitsInit - Extracts a value at most 8 bits in width from a BitsInit.
@@ -203,7 +84,7 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
AdSize = byteFromRec(Rec, "AdSizeBits");
HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix");
HasVEX_4V = Rec->getValueAsBit("hasVEX_4V");
- HasVEX_WPrefix = Rec->getValueAsBit("hasVEX_WPrefix");
+ VEX_WPrefix = byteFromRec(Rec,"VEX_WPrefix");
IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L");
HasEVEX_L2Prefix = Rec->getValueAsBit("hasEVEX_L2");
HasEVEX_K = Rec->getValueAsBit("hasEVEX_K");
@@ -280,7 +161,7 @@ InstructionContext RecognizableInstr::insnContext() const {
llvm_unreachable("Don't support VEX.L if EVEX_L2 is enabled");
}
// VEX_L & VEX_W
- if (HasVEX_LPrefix && HasVEX_WPrefix) {
+ if (HasVEX_LPrefix && VEX_WPrefix == X86Local::VEX_W1) {
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L_W_OPSIZE);
else if (OpPrefix == X86Local::XS)
@@ -308,7 +189,7 @@ InstructionContext RecognizableInstr::insnContext() const {
llvm_unreachable("Invalid prefix");
}
}
- else if (HasEVEX_L2Prefix && HasVEX_WPrefix) {
+ else if (HasEVEX_L2Prefix && VEX_WPrefix == X86Local::VEX_W1) {
// EVEX_L2 & VEX_W
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L2_W_OPSIZE);
@@ -337,7 +218,7 @@ InstructionContext RecognizableInstr::insnContext() const {
llvm_unreachable("Invalid prefix");
}
}
- else if (HasVEX_WPrefix) {
+ else if (VEX_WPrefix == X86Local::VEX_W1) {
// VEX_W
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_W_OPSIZE);
@@ -363,7 +244,7 @@ InstructionContext RecognizableInstr::insnContext() const {
insnContext = EVEX_KB(IC_EVEX);
/// eof EVEX
} else if (Encoding == X86Local::VEX || Encoding == X86Local::XOP) {
- if (HasVEX_LPrefix && HasVEX_WPrefix) {
+ if (HasVEX_LPrefix && VEX_WPrefix == X86Local::VEX_W1) {
if (OpPrefix == X86Local::PD)
insnContext = IC_VEX_L_W_OPSIZE;
else if (OpPrefix == X86Local::XS)
@@ -378,7 +259,7 @@ InstructionContext RecognizableInstr::insnContext() const {
}
} else if (OpPrefix == X86Local::PD && HasVEX_LPrefix)
insnContext = IC_VEX_L_OPSIZE;
- else if (OpPrefix == X86Local::PD && HasVEX_WPrefix)
+ else if (OpPrefix == X86Local::PD && VEX_WPrefix == X86Local::VEX_W1)
insnContext = IC_VEX_W_OPSIZE;
else if (OpPrefix == X86Local::PD)
insnContext = IC_VEX_OPSIZE;
@@ -386,11 +267,11 @@ InstructionContext RecognizableInstr::insnContext() const {
insnContext = IC_VEX_L_XS;
else if (HasVEX_LPrefix && OpPrefix == X86Local::XD)
insnContext = IC_VEX_L_XD;
- else if (HasVEX_WPrefix && OpPrefix == X86Local::XS)
+ else if (VEX_WPrefix == X86Local::VEX_W1 && OpPrefix == X86Local::XS)
insnContext = IC_VEX_W_XS;
- else if (HasVEX_WPrefix && OpPrefix == X86Local::XD)
+ else if (VEX_WPrefix == X86Local::VEX_W1 && OpPrefix == X86Local::XD)
insnContext = IC_VEX_W_XD;
- else if (HasVEX_WPrefix && OpPrefix == X86Local::PS)
+ else if (VEX_WPrefix == X86Local::VEX_W1 && OpPrefix == X86Local::PS)
insnContext = IC_VEX_W;
else if (HasVEX_LPrefix && OpPrefix == X86Local::PS)
insnContext = IC_VEX_L;
@@ -457,10 +338,12 @@ void RecognizableInstr::adjustOperandEncoding(OperandEncoding &encoding) {
// The scaling factor for AVX512 compressed displacement encoding is an
// instruction attribute. Adjust the ModRM encoding type to include the
// scale for compressed displacement.
- if (encoding != ENCODING_RM || CD8_Scale == 0)
+ if ((encoding != ENCODING_RM && encoding != ENCODING_VSIB) ||CD8_Scale == 0)
return;
encoding = (OperandEncoding)(encoding + Log2_32(CD8_Scale));
- assert(encoding <= ENCODING_RM_CD64 && "Invalid CDisp scaling");
+ assert(((encoding >= ENCODING_RM && encoding <= ENCODING_RM_CD64) ||
+ (encoding >= ENCODING_VSIB && encoding <= ENCODING_VSIB_CD64)) &&
+ "Invalid CDisp scaling");
}
void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex,
@@ -484,7 +367,7 @@ void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex,
++operandIndex;
}
- const std::string &typeName = (*Operands)[operandIndex].Rec->getName();
+ StringRef typeName = (*Operands)[operandIndex].Rec->getName();
OperandEncoding encoding = encodingFromString(typeName, OpSize);
// Adjust the encoding type for an operand based on the instruction.
@@ -662,7 +545,7 @@ void RecognizableInstr::emitInstructionSpecifier() {
break;
case X86Local::MRMSrcReg4VOp3:
assert(numPhysicalOperands == 3 &&
- "Unexpected number of operands for MRMSrcRegFrm");
+ "Unexpected number of operands for MRMSrcReg4VOp3Frm");
HANDLE_OPERAND(roRegister)
HANDLE_OPERAND(rmRegister)
HANDLE_OPERAND(vvvvRegister)
@@ -702,7 +585,7 @@ void RecognizableInstr::emitInstructionSpecifier() {
break;
case X86Local::MRMSrcMem4VOp3:
assert(numPhysicalOperands == 3 &&
- "Unexpected number of operands for MRMSrcMemFrm");
+ "Unexpected number of operands for MRMSrcMem4VOp3Frm");
HANDLE_OPERAND(roRegister)
HANDLE_OPERAND(memory)
HANDLE_OPERAND(vvvvRegister)
@@ -884,7 +767,7 @@ void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const {
case X86Local::MRM6m: case X86Local::MRM7m:
filter = new ExtendedFilter(false, Form - X86Local::MRM0m);
break;
- MRM_MAPPING
+ X86_INSTR_MRM_MAPPING
filter = new ExactFilter(0xC0 + Form - X86Local::MRM_C0); \
break;
} // switch (Form)
@@ -944,121 +827,121 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
// For OpSize16 instructions, a declared 16-bit register or
// immediate encoding is special.
TYPE("GR16", TYPE_Rv)
- TYPE("i16imm", TYPE_IMMv)
} else if(OpSize == X86Local::OpSize32) {
// For OpSize32 instructions, a declared 32-bit register or
// immediate encoding is special.
TYPE("GR32", TYPE_Rv)
}
- TYPE("i16mem", TYPE_Mv)
- TYPE("i16imm", TYPE_IMM16)
- TYPE("i16i8imm", TYPE_IMMv)
+ TYPE("i16mem", TYPE_M)
+ TYPE("i16imm", TYPE_IMM)
+ TYPE("i16i8imm", TYPE_IMM)
TYPE("GR16", TYPE_R16)
- TYPE("i32mem", TYPE_Mv)
- TYPE("i32imm", TYPE_IMMv)
- TYPE("i32i8imm", TYPE_IMM32)
+ TYPE("i32mem", TYPE_M)
+ TYPE("i32imm", TYPE_IMM)
+ TYPE("i32i8imm", TYPE_IMM)
TYPE("GR32", TYPE_R32)
TYPE("GR32orGR64", TYPE_R32)
- TYPE("i64mem", TYPE_Mv)
- TYPE("i64i32imm", TYPE_IMM64)
- TYPE("i64i8imm", TYPE_IMM64)
+ TYPE("i64mem", TYPE_M)
+ TYPE("i64i32imm", TYPE_IMM)
+ TYPE("i64i8imm", TYPE_IMM)
TYPE("GR64", TYPE_R64)
- TYPE("i8mem", TYPE_M8)
- TYPE("i8imm", TYPE_IMM8)
+ TYPE("i8mem", TYPE_M)
+ TYPE("i8imm", TYPE_IMM)
TYPE("u8imm", TYPE_UIMM8)
TYPE("i32u8imm", TYPE_UIMM8)
TYPE("GR8", TYPE_R8)
- TYPE("VR128", TYPE_XMM128)
- TYPE("VR128X", TYPE_XMM128)
- TYPE("f128mem", TYPE_M128)
- TYPE("f256mem", TYPE_M256)
- TYPE("f512mem", TYPE_M512)
- TYPE("FR128", TYPE_XMM128)
- TYPE("FR64", TYPE_XMM64)
- TYPE("FR64X", TYPE_XMM64)
- TYPE("f64mem", TYPE_M64FP)
- TYPE("sdmem", TYPE_M64FP)
- TYPE("FR32", TYPE_XMM32)
- TYPE("FR32X", TYPE_XMM32)
- TYPE("f32mem", TYPE_M32FP)
- TYPE("ssmem", TYPE_M32FP)
+ TYPE("VR128", TYPE_XMM)
+ TYPE("VR128X", TYPE_XMM)
+ TYPE("f128mem", TYPE_M)
+ TYPE("f256mem", TYPE_M)
+ TYPE("f512mem", TYPE_M)
+ TYPE("FR128", TYPE_XMM)
+ TYPE("FR64", TYPE_XMM)
+ TYPE("FR64X", TYPE_XMM)
+ TYPE("f64mem", TYPE_M)
+ TYPE("sdmem", TYPE_M)
+ TYPE("FR32", TYPE_XMM)
+ TYPE("FR32X", TYPE_XMM)
+ TYPE("f32mem", TYPE_M)
+ TYPE("ssmem", TYPE_M)
TYPE("RST", TYPE_ST)
- TYPE("i128mem", TYPE_M128)
- TYPE("i256mem", TYPE_M256)
- TYPE("i512mem", TYPE_M512)
- TYPE("i64i32imm_pcrel", TYPE_REL64)
- TYPE("i16imm_pcrel", TYPE_REL16)
- TYPE("i32imm_pcrel", TYPE_REL32)
+ TYPE("i128mem", TYPE_M)
+ TYPE("i256mem", TYPE_M)
+ TYPE("i512mem", TYPE_M)
+ TYPE("i64i32imm_pcrel", TYPE_REL)
+ TYPE("i16imm_pcrel", TYPE_REL)
+ TYPE("i32imm_pcrel", TYPE_REL)
TYPE("SSECC", TYPE_IMM3)
TYPE("XOPCC", TYPE_IMM3)
TYPE("AVXCC", TYPE_IMM5)
TYPE("AVX512ICC", TYPE_AVX512ICC)
- TYPE("AVX512RC", TYPE_IMM32)
- TYPE("brtarget32", TYPE_RELv)
- TYPE("brtarget16", TYPE_RELv)
- TYPE("brtarget8", TYPE_REL8)
- TYPE("f80mem", TYPE_M80FP)
- TYPE("lea64_32mem", TYPE_LEA)
- TYPE("lea64mem", TYPE_LEA)
+ TYPE("AVX512RC", TYPE_IMM)
+ TYPE("brtarget32", TYPE_REL)
+ TYPE("brtarget16", TYPE_REL)
+ TYPE("brtarget8", TYPE_REL)
+ TYPE("f80mem", TYPE_M)
+ TYPE("lea64_32mem", TYPE_M)
+ TYPE("lea64mem", TYPE_M)
TYPE("VR64", TYPE_MM64)
- TYPE("i64imm", TYPE_IMMv)
+ TYPE("i64imm", TYPE_IMM)
TYPE("anymem", TYPE_M)
- TYPE("opaque32mem", TYPE_M1616)
- TYPE("opaque48mem", TYPE_M1632)
- TYPE("opaque80mem", TYPE_M1664)
- TYPE("opaque512mem", TYPE_M512)
+ TYPE("opaque32mem", TYPE_M)
+ TYPE("opaque48mem", TYPE_M)
+ TYPE("opaque80mem", TYPE_M)
+ TYPE("opaque512mem", TYPE_M)
TYPE("SEGMENT_REG", TYPE_SEGMENTREG)
TYPE("DEBUG_REG", TYPE_DEBUGREG)
TYPE("CONTROL_REG", TYPE_CONTROLREG)
- TYPE("srcidx8", TYPE_SRCIDX8)
- TYPE("srcidx16", TYPE_SRCIDX16)
- TYPE("srcidx32", TYPE_SRCIDX32)
- TYPE("srcidx64", TYPE_SRCIDX64)
- TYPE("dstidx8", TYPE_DSTIDX8)
- TYPE("dstidx16", TYPE_DSTIDX16)
- TYPE("dstidx32", TYPE_DSTIDX32)
- TYPE("dstidx64", TYPE_DSTIDX64)
- TYPE("offset16_8", TYPE_MOFFS8)
- TYPE("offset16_16", TYPE_MOFFS16)
- TYPE("offset16_32", TYPE_MOFFS32)
- TYPE("offset32_8", TYPE_MOFFS8)
- TYPE("offset32_16", TYPE_MOFFS16)
- TYPE("offset32_32", TYPE_MOFFS32)
- TYPE("offset32_64", TYPE_MOFFS64)
- TYPE("offset64_8", TYPE_MOFFS8)
- TYPE("offset64_16", TYPE_MOFFS16)
- TYPE("offset64_32", TYPE_MOFFS32)
- TYPE("offset64_64", TYPE_MOFFS64)
- TYPE("VR256", TYPE_XMM256)
- TYPE("VR256X", TYPE_XMM256)
- TYPE("VR512", TYPE_XMM512)
- TYPE("VK1", TYPE_VK1)
- TYPE("VK1WM", TYPE_VK1)
- TYPE("VK2", TYPE_VK2)
- TYPE("VK2WM", TYPE_VK2)
- TYPE("VK4", TYPE_VK4)
- TYPE("VK4WM", TYPE_VK4)
- TYPE("VK8", TYPE_VK8)
- TYPE("VK8WM", TYPE_VK8)
- TYPE("VK16", TYPE_VK16)
- TYPE("VK16WM", TYPE_VK16)
- TYPE("VK32", TYPE_VK32)
- TYPE("VK32WM", TYPE_VK32)
- TYPE("VK64", TYPE_VK64)
- TYPE("VK64WM", TYPE_VK64)
+ TYPE("srcidx8", TYPE_SRCIDX)
+ TYPE("srcidx16", TYPE_SRCIDX)
+ TYPE("srcidx32", TYPE_SRCIDX)
+ TYPE("srcidx64", TYPE_SRCIDX)
+ TYPE("dstidx8", TYPE_DSTIDX)
+ TYPE("dstidx16", TYPE_DSTIDX)
+ TYPE("dstidx32", TYPE_DSTIDX)
+ TYPE("dstidx64", TYPE_DSTIDX)
+ TYPE("offset16_8", TYPE_MOFFS)
+ TYPE("offset16_16", TYPE_MOFFS)
+ TYPE("offset16_32", TYPE_MOFFS)
+ TYPE("offset32_8", TYPE_MOFFS)
+ TYPE("offset32_16", TYPE_MOFFS)
+ TYPE("offset32_32", TYPE_MOFFS)
+ TYPE("offset32_64", TYPE_MOFFS)
+ TYPE("offset64_8", TYPE_MOFFS)
+ TYPE("offset64_16", TYPE_MOFFS)
+ TYPE("offset64_32", TYPE_MOFFS)
+ TYPE("offset64_64", TYPE_MOFFS)
+ TYPE("VR256", TYPE_YMM)
+ TYPE("VR256X", TYPE_YMM)
+ TYPE("VR512", TYPE_ZMM)
+ TYPE("VK1", TYPE_VK)
+ TYPE("VK1WM", TYPE_VK)
+ TYPE("VK2", TYPE_VK)
+ TYPE("VK2WM", TYPE_VK)
+ TYPE("VK4", TYPE_VK)
+ TYPE("VK4WM", TYPE_VK)
+ TYPE("VK8", TYPE_VK)
+ TYPE("VK8WM", TYPE_VK)
+ TYPE("VK16", TYPE_VK)
+ TYPE("VK16WM", TYPE_VK)
+ TYPE("VK32", TYPE_VK)
+ TYPE("VK32WM", TYPE_VK)
+ TYPE("VK64", TYPE_VK)
+ TYPE("VK64WM", TYPE_VK)
TYPE("GR32_NOAX", TYPE_Rv)
- TYPE("vx64mem", TYPE_M64)
- TYPE("vx128mem", TYPE_M128)
- TYPE("vx256mem", TYPE_M256)
- TYPE("vy128mem", TYPE_M128)
- TYPE("vy256mem", TYPE_M256)
- TYPE("vx64xmem", TYPE_M64)
- TYPE("vx128xmem", TYPE_M128)
- TYPE("vx256xmem", TYPE_M256)
- TYPE("vy128xmem", TYPE_M128)
- TYPE("vy256xmem", TYPE_M256)
- TYPE("vy512mem", TYPE_M512)
- TYPE("vz512mem", TYPE_M512)
+ TYPE("vx64mem", TYPE_M)
+ TYPE("vx128mem", TYPE_M)
+ TYPE("vx256mem", TYPE_M)
+ TYPE("vy128mem", TYPE_M)
+ TYPE("vy256mem", TYPE_M)
+ TYPE("vx64xmem", TYPE_M)
+ TYPE("vx128xmem", TYPE_M)
+ TYPE("vx256xmem", TYPE_M)
+ TYPE("vy128xmem", TYPE_M)
+ TYPE("vy256xmem", TYPE_M)
+ TYPE("vy512mem", TYPE_M)
+ TYPE("vz256xmem", TYPE_M)
+ TYPE("vz512mem", TYPE_M)
TYPE("BNDR", TYPE_BNDR)
errs() << "Unhandled type string " << s << "\n";
llvm_unreachable("Unhandled type string");
@@ -1242,18 +1125,19 @@ RecognizableInstr::memoryEncodingFromString(const std::string &s,
ENCODING("opaque48mem", ENCODING_RM)
ENCODING("opaque80mem", ENCODING_RM)
ENCODING("opaque512mem", ENCODING_RM)
- ENCODING("vx64mem", ENCODING_RM)
- ENCODING("vx128mem", ENCODING_RM)
- ENCODING("vx256mem", ENCODING_RM)
- ENCODING("vy128mem", ENCODING_RM)
- ENCODING("vy256mem", ENCODING_RM)
- ENCODING("vx64xmem", ENCODING_RM)
- ENCODING("vx128xmem", ENCODING_RM)
- ENCODING("vx256xmem", ENCODING_RM)
- ENCODING("vy128xmem", ENCODING_RM)
- ENCODING("vy256xmem", ENCODING_RM)
- ENCODING("vy512mem", ENCODING_RM)
- ENCODING("vz512mem", ENCODING_RM)
+ ENCODING("vx64mem", ENCODING_VSIB)
+ ENCODING("vx128mem", ENCODING_VSIB)
+ ENCODING("vx256mem", ENCODING_VSIB)
+ ENCODING("vy128mem", ENCODING_VSIB)
+ ENCODING("vy256mem", ENCODING_VSIB)
+ ENCODING("vx64xmem", ENCODING_VSIB)
+ ENCODING("vx128xmem", ENCODING_VSIB)
+ ENCODING("vx256xmem", ENCODING_VSIB)
+ ENCODING("vy128xmem", ENCODING_VSIB)
+ ENCODING("vy256xmem", ENCODING_VSIB)
+ ENCODING("vy512mem", ENCODING_VSIB)
+ ENCODING("vz256xmem", ENCODING_VSIB)
+ ENCODING("vz512mem", ENCODING_VSIB)
errs() << "Unhandled memory encoding " << s << "\n";
llvm_unreachable("Unhandled memory encoding");
}
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
index 2e61158..ea99935 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
@@ -10,7 +10,7 @@
// This file is part of the X86 Disassembler Emitter.
// It contains the interface of a single recognizable instruction.
// Documentation for the disassembler emitter in general can be found in
-// X86DisasemblerEmitter.h.
+// X86DisassemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
@@ -24,6 +24,128 @@
namespace llvm {
+#define X86_INSTR_MRM_MAPPING \
+ MAP(C0, 64) \
+ MAP(C1, 65) \
+ MAP(C2, 66) \
+ MAP(C3, 67) \
+ MAP(C4, 68) \
+ MAP(C5, 69) \
+ MAP(C6, 70) \
+ MAP(C7, 71) \
+ MAP(C8, 72) \
+ MAP(C9, 73) \
+ MAP(CA, 74) \
+ MAP(CB, 75) \
+ MAP(CC, 76) \
+ MAP(CD, 77) \
+ MAP(CE, 78) \
+ MAP(CF, 79) \
+ MAP(D0, 80) \
+ MAP(D1, 81) \
+ MAP(D2, 82) \
+ MAP(D3, 83) \
+ MAP(D4, 84) \
+ MAP(D5, 85) \
+ MAP(D6, 86) \
+ MAP(D7, 87) \
+ MAP(D8, 88) \
+ MAP(D9, 89) \
+ MAP(DA, 90) \
+ MAP(DB, 91) \
+ MAP(DC, 92) \
+ MAP(DD, 93) \
+ MAP(DE, 94) \
+ MAP(DF, 95) \
+ MAP(E0, 96) \
+ MAP(E1, 97) \
+ MAP(E2, 98) \
+ MAP(E3, 99) \
+ MAP(E4, 100) \
+ MAP(E5, 101) \
+ MAP(E6, 102) \
+ MAP(E7, 103) \
+ MAP(E8, 104) \
+ MAP(E9, 105) \
+ MAP(EA, 106) \
+ MAP(EB, 107) \
+ MAP(EC, 108) \
+ MAP(ED, 109) \
+ MAP(EE, 110) \
+ MAP(EF, 111) \
+ MAP(F0, 112) \
+ MAP(F1, 113) \
+ MAP(F2, 114) \
+ MAP(F3, 115) \
+ MAP(F4, 116) \
+ MAP(F5, 117) \
+ MAP(F6, 118) \
+ MAP(F7, 119) \
+ MAP(F8, 120) \
+ MAP(F9, 121) \
+ MAP(FA, 122) \
+ MAP(FB, 123) \
+ MAP(FC, 124) \
+ MAP(FD, 125) \
+ MAP(FE, 126) \
+ MAP(FF, 127)
+
+// A clone of X86 since we can't depend on something that is generated.
+namespace X86Local {
+ enum {
+ Pseudo = 0,
+ RawFrm = 1,
+ AddRegFrm = 2,
+ RawFrmMemOffs = 3,
+ RawFrmSrc = 4,
+ RawFrmDst = 5,
+ RawFrmDstSrc = 6,
+ RawFrmImm8 = 7,
+ RawFrmImm16 = 8,
+ MRMDestMem = 32,
+ MRMSrcMem = 33,
+ MRMSrcMem4VOp3 = 34,
+ MRMSrcMemOp4 = 35,
+ MRMXm = 39,
+ MRM0m = 40, MRM1m = 41, MRM2m = 42, MRM3m = 43,
+ MRM4m = 44, MRM5m = 45, MRM6m = 46, MRM7m = 47,
+ MRMDestReg = 48,
+ MRMSrcReg = 49,
+ MRMSrcReg4VOp3 = 50,
+ MRMSrcRegOp4 = 51,
+ MRMXr = 55,
+ MRM0r = 56, MRM1r = 57, MRM2r = 58, MRM3r = 59,
+ MRM4r = 60, MRM5r = 61, MRM6r = 62, MRM7r = 63,
+#define MAP(from, to) MRM_##from = to,
+ X86_INSTR_MRM_MAPPING
+#undef MAP
+ };
+
+ enum {
+ OB = 0, TB = 1, T8 = 2, TA = 3, XOP8 = 4, XOP9 = 5, XOPA = 6
+ };
+
+ enum {
+ PS = 1, PD = 2, XS = 3, XD = 4
+ };
+
+ enum {
+ VEX = 1, XOP = 2, EVEX = 3
+ };
+
+ enum {
+ OpSize16 = 1, OpSize32 = 2
+ };
+
+ enum {
+ AdSize16 = 1, AdSize32 = 2, AdSize64 = 3
+ };
+
+ enum {
+ VEX_W0 = 0, VEX_W1 = 1, VEX_WIG = 2
+ };
+}
+
namespace X86Disassembler {
/// RecognizableInstr - Encapsulates all information required to decode a single
@@ -55,8 +177,8 @@ private:
bool HasREX_WPrefix;
/// The hasVEX_4V field from the record
bool HasVEX_4V;
- /// The hasVEX_WPrefix field from the record
- bool HasVEX_WPrefix;
+ /// The VEX_WPrefix field from the record
+ uint8_t VEX_WPrefix;
/// Inferred from the operands; indicates whether the L bit in the VEX prefix is set
bool HasVEX_LPrefix;
/// The ignoreVEX_L field from the record
OpenPOWER on IntegriCloud