diff options
Diffstat (limited to 'utils')
-rw-r--r-- | utils/TableGen/ClangAttrEmitter.cpp | 42 | ||||
-rw-r--r-- | utils/TableGen/ClangCommentCommandInfoEmitter.cpp | 3 | ||||
-rw-r--r-- | utils/TableGen/ClangDiagnosticsEmitter.cpp | 2 | ||||
-rw-r--r-- | utils/TableGen/NeonEmitter.cpp | 572 | ||||
-rw-r--r-- | utils/TableGen/TableGen.cpp | 8 | ||||
-rw-r--r-- | utils/TableGen/TableGenBackends.h | 1 | ||||
-rw-r--r-- | utils/analyzer/SATestBuild.py | 5 |
7 files changed, 594 insertions, 39 deletions
diff --git a/utils/TableGen/ClangAttrEmitter.cpp b/utils/TableGen/ClangAttrEmitter.cpp index 7c8603f..eaf10a6 100644 --- a/utils/TableGen/ClangAttrEmitter.cpp +++ b/utils/TableGen/ClangAttrEmitter.cpp @@ -971,6 +971,48 @@ void EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) { OS << "#endif\n"; } +// Emits the all-arguments-are-expressions property for attributes. +void EmitClangAttrExprArgsList(RecordKeeper &Records, raw_ostream &OS) { + emitSourceFileHeader("llvm::StringSwitch code to match attributes with " + "expression arguments", OS); + + std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"); + + for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end(); + I != E; ++I) { + Record &Attr = **I; + + // Determine whether the first argument is something that is always + // an expression. + std::vector<Record *> Args = Attr.getValueAsListOfDefs("Args"); + if (Args.empty() || Args[0]->getSuperClasses().empty()) + continue; + + // Check whether this is one of the argument kinds that implies an + // expression. + // FIXME: Aligned is weird. + if (!llvm::StringSwitch<bool>(Args[0]->getSuperClasses().back()->getName()) + .Case("AlignedArgument", true) + .Case("BoolArgument", true) + .Case("DefaultIntArgument", true) + .Case("IntArgument", true) + .Case("ExprArgument", true) + .Case("UnsignedArgument", true) + .Case("VariadicUnsignedArgument", true) + .Case("VariadicExprArgument", true) + .Default(false)) + continue; + + std::vector<Record*> Spellings = Attr.getValueAsListOfDefs("Spellings"); + + for (std::vector<Record*>::const_iterator I = Spellings.begin(), + E = Spellings.end(); I != E; ++I) { + OS << ".Case(\"" << (*I)->getValueAsString("Name") << "\", " + << "true" << ")\n"; + } + } +} + // Emits the class method definitions for attributes. void EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Attribute classes' member function definitions", OS); diff --git a/utils/TableGen/ClangCommentCommandInfoEmitter.cpp b/utils/TableGen/ClangCommentCommandInfoEmitter.cpp index ebb0427..cab1c2b 100644 --- a/utils/TableGen/ClangCommentCommandInfoEmitter.cpp +++ b/utils/TableGen/ClangCommentCommandInfoEmitter.cpp @@ -97,6 +97,9 @@ static std::string MangleName(StringRef Str) { case '$': Mangled += "dollar"; break; + case '/': + Mangled += "slash"; + break; } } return Mangled; diff --git a/utils/TableGen/ClangDiagnosticsEmitter.cpp b/utils/TableGen/ClangDiagnosticsEmitter.cpp index 291eb75..da15c93 100644 --- a/utils/TableGen/ClangDiagnosticsEmitter.cpp +++ b/utils/TableGen/ClangDiagnosticsEmitter.cpp @@ -245,7 +245,7 @@ static void groupDiagnostics(const std::vector<Record*> &Diags, SourceMgr::DK_Error, Twine("group '") + Name + "' is referred to anonymously", - ArrayRef<SMRange>(), + None, InGroupRange.isValid() ? FixIt : ArrayRef<SMFixIt>()); SrcMgr.PrintMessage((*I)->ExplicitDef->getLoc().front(), diff --git a/utils/TableGen/NeonEmitter.cpp b/utils/TableGen/NeonEmitter.cpp index d453ede..34b955e 100644 --- a/utils/TableGen/NeonEmitter.cpp +++ b/utils/TableGen/NeonEmitter.cpp @@ -98,7 +98,12 @@ enum ClassKind { ClassI, // generic integer instruction, e.g., "i8" suffix ClassS, // signed/unsigned/poly, e.g., "s8", "u8" or "p8" suffix ClassW, // width-specific instruction, e.g., "8" suffix - ClassB // bitcast arguments with enum argument to specify type + ClassB, // bitcast arguments with enum argument to specify type + ClassL, // Logical instructions which are op instructions + // but we need to not emit any suffix for in our + // tests. + ClassNoTest // Instructions which we do not test since they are + // not TRUE instructions. }; /// NeonTypeFlags - Flags to identify the types for overloaded Neon @@ -204,9 +209,20 @@ public: Record *SI = R.getClass("SInst"); Record *II = R.getClass("IInst"); Record *WI = R.getClass("WInst"); + Record *SOpI = R.getClass("SOpInst"); + Record *IOpI = R.getClass("IOpInst"); + Record *WOpI = R.getClass("WOpInst"); + Record *LOpI = R.getClass("LOpInst"); + Record *NoTestOpI = R.getClass("NoTestOpInst"); + ClassMap[SI] = ClassS; ClassMap[II] = ClassI; ClassMap[WI] = ClassW; + ClassMap[SOpI] = ClassS; + ClassMap[IOpI] = ClassI; + ClassMap[WOpI] = ClassW; + ClassMap[LOpI] = ClassL; + ClassMap[NoTestOpI] = ClassNoTest; } // run - Emit arm_neon.h.inc @@ -572,73 +588,89 @@ static std::string BuiltinTypeString(const char mod, StringRef typestr, return quad ? "V16Sc" : "V8Sc"; } -/// MangleName - Append a type or width suffix to a base neon function name, -/// and insert a 'q' in the appropriate location if the operation works on -/// 128b rather than 64b. E.g. turn "vst2_lane" into "vst2q_lane_f32", etc. -static std::string MangleName(const std::string &name, StringRef typestr, - ClassKind ck) { - if (name == "vcvt_f32_f16") - return name; - - bool quad = false; +/// InstructionTypeCode - Computes the ARM argument character code and +/// quad status for a specific type string and ClassKind. +static void InstructionTypeCode(const StringRef &typeStr, + const ClassKind ck, + bool &quad, + std::string &typeCode) { bool poly = false; bool usgn = false; - char type = ClassifyType(typestr, quad, poly, usgn); - - std::string s = name; + char type = ClassifyType(typeStr, quad, poly, usgn); switch (type) { case 'c': switch (ck) { - case ClassS: s += poly ? "_p8" : usgn ? "_u8" : "_s8"; break; - case ClassI: s += "_i8"; break; - case ClassW: s += "_8"; break; + case ClassS: typeCode = poly ? "p8" : usgn ? "u8" : "s8"; break; + case ClassI: typeCode = "i8"; break; + case ClassW: typeCode = "8"; break; default: break; } break; case 's': switch (ck) { - case ClassS: s += poly ? "_p16" : usgn ? "_u16" : "_s16"; break; - case ClassI: s += "_i16"; break; - case ClassW: s += "_16"; break; + case ClassS: typeCode = poly ? "p16" : usgn ? "u16" : "s16"; break; + case ClassI: typeCode = "i16"; break; + case ClassW: typeCode = "16"; break; default: break; } break; case 'i': switch (ck) { - case ClassS: s += usgn ? "_u32" : "_s32"; break; - case ClassI: s += "_i32"; break; - case ClassW: s += "_32"; break; + case ClassS: typeCode = usgn ? "u32" : "s32"; break; + case ClassI: typeCode = "i32"; break; + case ClassW: typeCode = "32"; break; default: break; } break; case 'l': switch (ck) { - case ClassS: s += usgn ? "_u64" : "_s64"; break; - case ClassI: s += "_i64"; break; - case ClassW: s += "_64"; break; + case ClassS: typeCode = usgn ? "u64" : "s64"; break; + case ClassI: typeCode = "i64"; break; + case ClassW: typeCode = "64"; break; default: break; } break; case 'h': switch (ck) { case ClassS: - case ClassI: s += "_f16"; break; - case ClassW: s += "_16"; break; + case ClassI: typeCode = "f16"; break; + case ClassW: typeCode = "16"; break; default: break; } break; case 'f': switch (ck) { case ClassS: - case ClassI: s += "_f32"; break; - case ClassW: s += "_32"; break; + case ClassI: typeCode = "f32"; break; + case ClassW: typeCode = "32"; break; default: break; } break; default: PrintFatalError("unhandled type!"); } +} + +/// MangleName - Append a type or width suffix to a base neon function name, +/// and insert a 'q' in the appropriate location if the operation works on +/// 128b rather than 64b. E.g. turn "vst2_lane" into "vst2q_lane_f32", etc. +static std::string MangleName(const std::string &name, StringRef typestr, + ClassKind ck) { + if (name == "vcvt_f32_f16") + return name; + + bool quad = false; + std::string typeCode = ""; + + InstructionTypeCode(typestr, ck, quad, typeCode); + + std::string s = name; + + if (typeCode.size() > 0) { + s += "_" + typeCode; + } + if (ck == ClassB) s += "_v"; @@ -648,9 +680,457 @@ static std::string MangleName(const std::string &name, StringRef typestr, size_t pos = s.find('_'); s = s.insert(pos, "q"); } + return s; } +static void PreprocessInstruction(const StringRef &Name, + const std::string &InstName, + std::string &Prefix, + bool &HasNPostfix, + bool &HasLanePostfix, + bool &HasDupPostfix, + bool &IsSpecialVCvt, + size_t &TBNumber) { + // All of our instruction name fields from arm_neon.td are of the form + // <instructionname>_... + // Thus we grab our instruction name via computation of said Prefix. + const size_t PrefixEnd = Name.find_first_of('_'); + // If InstName is passed in, we use that instead of our name Prefix. + Prefix = InstName.size() == 0? Name.slice(0, PrefixEnd).str() : InstName; + + const StringRef Postfix = Name.slice(PrefixEnd, Name.size()); + + HasNPostfix = Postfix.count("_n"); + HasLanePostfix = Postfix.count("_lane"); + HasDupPostfix = Postfix.count("_dup"); + IsSpecialVCvt = Postfix.size() != 0 && Name.count("vcvt"); + + if (InstName.compare("vtbl") == 0 || + InstName.compare("vtbx") == 0) { + // If we have a vtblN/vtbxN instruction, use the instruction's ASCII + // encoding to get its true value. + TBNumber = Name[Name.size()-1] - 48; + } +} + +/// GenerateRegisterCheckPatternsForLoadStores - Given a bunch of data we have +/// extracted, generate a FileCheck pattern for a Load Or Store +static void +GenerateRegisterCheckPatternForLoadStores(const StringRef &NameRef, + const std::string& OutTypeCode, + const bool &IsQuad, + const bool &HasDupPostfix, + const bool &HasLanePostfix, + const size_t Count, + std::string &RegisterSuffix) { + const bool IsLDSTOne = NameRef.count("vld1") || NameRef.count("vst1"); + // If N == 3 || N == 4 and we are dealing with a quad instruction, Clang + // will output a series of v{ld,st}1s, so we have to handle it specially. + if ((Count == 3 || Count == 4) && IsQuad) { + RegisterSuffix += "{"; + for (size_t i = 0; i < Count; i++) { + RegisterSuffix += "d{{[0-9]+}}"; + if (HasDupPostfix) { + RegisterSuffix += "[]"; + } + if (HasLanePostfix) { + RegisterSuffix += "[{{[0-9]+}}]"; + } + if (i < Count-1) { + RegisterSuffix += ", "; + } + } + RegisterSuffix += "}"; + } else { + + // Handle normal loads and stores. + RegisterSuffix += "{"; + for (size_t i = 0; i < Count; i++) { + RegisterSuffix += "d{{[0-9]+}}"; + if (HasDupPostfix) { + RegisterSuffix += "[]"; + } + if (HasLanePostfix) { + RegisterSuffix += "[{{[0-9]+}}]"; + } + if (IsQuad && !HasLanePostfix) { + RegisterSuffix += ", d{{[0-9]+}}"; + if (HasDupPostfix) { + RegisterSuffix += "[]"; + } + } + if (i < Count-1) { + RegisterSuffix += ", "; + } + } + RegisterSuffix += "}, [r{{[0-9]+}}"; + + // We only include the alignment hint if we have a vld1.*64 or + // a dup/lane instruction. + if (IsLDSTOne) { + if ((HasLanePostfix || HasDupPostfix) && OutTypeCode != "8") { + RegisterSuffix += ", :" + OutTypeCode; + } else if (OutTypeCode == "64") { + RegisterSuffix += ", :64"; + } + } + + RegisterSuffix += "]"; + } +} + +static bool HasNPostfixAndScalarArgs(const StringRef &NameRef, + const bool &HasNPostfix) { + return (NameRef.count("vmla") || + NameRef.count("vmlal") || + NameRef.count("vmlsl") || + NameRef.count("vmull") || + NameRef.count("vqdmlal") || + NameRef.count("vqdmlsl") || + NameRef.count("vqdmulh") || + NameRef.count("vqdmull") || + NameRef.count("vqrdmulh")) && HasNPostfix; +} + +static bool IsFiveOperandLaneAccumulator(const StringRef &NameRef, + const bool &HasLanePostfix) { + return (NameRef.count("vmla") || + NameRef.count("vmls") || + NameRef.count("vmlal") || + NameRef.count("vmlsl") || + (NameRef.count("vmul") && NameRef.size() == 3)|| + NameRef.count("vqdmlal") || + NameRef.count("vqdmlsl") || + NameRef.count("vqdmulh") || + NameRef.count("vqrdmulh")) && HasLanePostfix; +} + +static bool IsSpecialLaneMultiply(const StringRef &NameRef, + const bool &HasLanePostfix, + const bool &IsQuad) { + const bool IsVMulOrMulh = (NameRef.count("vmul") || NameRef.count("mulh")) + && IsQuad; + const bool IsVMull = NameRef.count("mull") && !IsQuad; + return (IsVMulOrMulh || IsVMull) && HasLanePostfix; +} + +static void NormalizeProtoForRegisterPatternCreation(const std::string &Name, + const std::string &Proto, + const bool &HasNPostfix, + const bool &IsQuad, + const bool &HasLanePostfix, + const bool &HasDupPostfix, + std::string &NormedProto) { + // Handle generic case. + const StringRef NameRef(Name); + for (size_t i = 0, end = Proto.size(); i < end; i++) { + switch (Proto[i]) { + case 'u': + case 'f': + case 'd': + case 's': + case 'x': + case 't': + case 'n': + NormedProto += IsQuad? 'q' : 'd'; + break; + case 'w': + case 'k': + NormedProto += 'q'; + break; + case 'g': + case 'h': + case 'e': + NormedProto += 'd'; + break; + case 'i': + NormedProto += HasLanePostfix? 'a' : 'i'; + break; + case 'a': + if (HasLanePostfix) { + NormedProto += 'a'; + } else if (HasNPostfixAndScalarArgs(NameRef, HasNPostfix)) { + NormedProto += IsQuad? 'q' : 'd'; + } else { + NormedProto += 'i'; + } + break; + } + } + + // Handle Special Cases. + const bool IsNotVExt = !NameRef.count("vext"); + const bool IsVPADAL = NameRef.count("vpadal"); + const bool Is5OpLaneAccum = IsFiveOperandLaneAccumulator(NameRef, + HasLanePostfix); + const bool IsSpecialLaneMul = IsSpecialLaneMultiply(NameRef, HasLanePostfix, + IsQuad); + + if (IsSpecialLaneMul) { + // If + NormedProto[2] = NormedProto[3]; + NormedProto.erase(3); + } else if (NormedProto.size() == 4 && + NormedProto[0] == NormedProto[1] && + IsNotVExt) { + // If NormedProto.size() == 4 and the first two proto characters are the + // same, ignore the first. + NormedProto = NormedProto.substr(1, 3); + } else if (Is5OpLaneAccum) { + // If we have a 5 op lane accumulator operation, we take characters 1,2,4 + std::string tmp = NormedProto.substr(1,2); + tmp += NormedProto[4]; + NormedProto = tmp; + } else if (IsVPADAL) { + // If we have VPADAL, ignore the first character. + NormedProto = NormedProto.substr(0, 2); + } else if (NameRef.count("vdup") && NormedProto.size() > 2) { + // If our instruction is a dup instruction, keep only the first and + // last characters. + std::string tmp = ""; + tmp += NormedProto[0]; + tmp += NormedProto[NormedProto.size()-1]; + NormedProto = tmp; + } +} + +/// GenerateRegisterCheckPatterns - Given a bunch of data we have +/// extracted, generate a FileCheck pattern to check that an +/// instruction's arguments are correct. +static void GenerateRegisterCheckPattern(const std::string &Name, + const std::string &Proto, + const std::string &OutTypeCode, + const bool &HasNPostfix, + const bool &IsQuad, + const bool &HasLanePostfix, + const bool &HasDupPostfix, + const size_t &TBNumber, + std::string &RegisterSuffix) { + + RegisterSuffix = ""; + + const StringRef NameRef(Name); + const StringRef ProtoRef(Proto); + + if ((NameRef.count("vdup") || NameRef.count("vmov")) && HasNPostfix) { + return; + } + + const bool IsLoadStore = NameRef.count("vld") || NameRef.count("vst"); + const bool IsTBXOrTBL = NameRef.count("vtbl") || NameRef.count("vtbx"); + + if (IsLoadStore) { + // Grab N value from v{ld,st}N using its ascii representation. + const size_t Count = NameRef[3] - 48; + + GenerateRegisterCheckPatternForLoadStores(NameRef, OutTypeCode, IsQuad, + HasDupPostfix, HasLanePostfix, + Count, RegisterSuffix); + } else if (IsTBXOrTBL) { + RegisterSuffix += "d{{[0-9]+}}, {"; + for (size_t i = 0; i < TBNumber-1; i++) { + RegisterSuffix += "d{{[0-9]+}}, "; + } + RegisterSuffix += "d{{[0-9]+}}}, d{{[0-9]+}}"; + } else { + // Handle a normal instruction. + if (NameRef.count("vget") || NameRef.count("vset")) + return; + + // We first normalize our proto, since we only need to emit 4 + // different types of checks, yet have more than 4 proto types + // that map onto those 4 patterns. + std::string NormalizedProto(""); + NormalizeProtoForRegisterPatternCreation(Name, Proto, HasNPostfix, IsQuad, + HasLanePostfix, HasDupPostfix, + NormalizedProto); + + for (size_t i = 0, end = NormalizedProto.size(); i < end; i++) { + const char &c = NormalizedProto[i]; + switch (c) { + case 'q': + RegisterSuffix += "q{{[0-9]+}}, "; + break; + + case 'd': + RegisterSuffix += "d{{[0-9]+}}, "; + break; + + case 'i': + RegisterSuffix += "#{{[0-9]+}}, "; + break; + + case 'a': + RegisterSuffix += "d{{[0-9]+}}[{{[0-9]}}], "; + break; + } + } + + // Remove extra ", ". + RegisterSuffix = RegisterSuffix.substr(0, RegisterSuffix.size()-2); + } +} + +/// GenerateChecksForIntrinsic - Given a specific instruction name + +/// typestr + class kind, generate the proper set of FileCheck +/// Patterns to check for. We could just return a string, but instead +/// use a vector since it provides us with the extra flexibility of +/// emitting multiple checks, which comes in handy for certain cases +/// like mla where we want to check for 2 different instructions. +static void GenerateChecksForIntrinsic(const std::string &Name, + const std::string &Proto, + StringRef &OutTypeStr, + StringRef &InTypeStr, + ClassKind Ck, + const std::string &InstName, + bool IsHiddenLOp, + std::vector<std::string>& Result) { + + // If Ck is a ClassNoTest instruction, just return so no test is + // emitted. + if(Ck == ClassNoTest) + return; + + if (Name == "vcvt_f32_f16") { + Result.push_back("vcvt.f32.f16"); + return; + } + + + // Now we preprocess our instruction given the data we have to get the + // data that we need. + // Create a StringRef for String Manipulation of our Name. + const StringRef NameRef(Name); + // Instruction Prefix. + std::string Prefix; + // The type code for our out type string. + std::string OutTypeCode; + // To handle our different cases, we need to check for different postfixes. + // Is our instruction a quad instruction. + bool IsQuad = false; + // Our instruction is of the form <instructionname>_n. + bool HasNPostfix = false; + // Our instruction is of the form <instructionname>_lane. + bool HasLanePostfix = false; + // Our instruction is of the form <instructionname>_dup. + bool HasDupPostfix = false; + // Our instruction is a vcvt instruction which requires special handling. + bool IsSpecialVCvt = false; + // If we have a vtbxN or vtblN instruction, this is set to N. + size_t TBNumber = -1; + // Register Suffix + std::string RegisterSuffix; + + PreprocessInstruction(NameRef, InstName, Prefix, + HasNPostfix, HasLanePostfix, HasDupPostfix, + IsSpecialVCvt, TBNumber); + + InstructionTypeCode(OutTypeStr, Ck, IsQuad, OutTypeCode); + GenerateRegisterCheckPattern(Name, Proto, OutTypeCode, HasNPostfix, IsQuad, + HasLanePostfix, HasDupPostfix, TBNumber, + RegisterSuffix); + + // In the following section, we handle a bunch of special cases. You can tell + // a special case by the fact we are returning early. + + // If our instruction is a logical instruction without postfix or a + // hidden LOp just return the current Prefix. + if (Ck == ClassL || IsHiddenLOp) { + Result.push_back(Prefix + " " + RegisterSuffix); + return; + } + + // If we have a vmov, due to the many different cases, some of which + // vary within the different intrinsics generated for a single + // instruction type, just output a vmov. (e.g. given an instruction + // A, A.u32 might be vmov and A.u8 might be vmov.8). + // + // FIXME: Maybe something can be done about this. The two cases that we care + // about are vmov as an LType and vmov as a WType. + if (Prefix == "vmov") { + Result.push_back(Prefix + " " + RegisterSuffix); + return; + } + + // In the following section, we handle special cases. + + if (OutTypeCode == "64") { + // If we have a 64 bit vdup/vext and are handling an uint64x1_t + // type, the intrinsic will be optimized away, so just return + // nothing. On the other hand if we are handling an uint64x2_t + // (i.e. quad instruction), vdup/vmov instructions should be + // emitted. + if (Prefix == "vdup" || Prefix == "vext") { + if (IsQuad) { + Result.push_back("{{vmov|vdup}}"); + } + return; + } + + // v{st,ld}{2,3,4}_{u,s}64 emit v{st,ld}1.64 instructions with + // multiple register operands. + bool MultiLoadPrefix = Prefix == "vld2" || Prefix == "vld3" + || Prefix == "vld4"; + bool MultiStorePrefix = Prefix == "vst2" || Prefix == "vst3" + || Prefix == "vst4"; + if (MultiLoadPrefix || MultiStorePrefix) { + Result.push_back(NameRef.slice(0, 3).str() + "1.64"); + return; + } + + // v{st,ld}1_{lane,dup}_{u64,s64} use vldr/vstr/vmov/str instead of + // emitting said instructions. So return a check for + // vldr/vstr/vmov/str instead. + if (HasLanePostfix || HasDupPostfix) { + if (Prefix == "vst1") { + Result.push_back("{{str|vstr|vmov}}"); + return; + } else if (Prefix == "vld1") { + Result.push_back("{{ldr|vldr|vmov}}"); + return; + } + } + } + + // vzip.32/vuzp.32 are the same instruction as vtrn.32 and are + // sometimes disassembled as vtrn.32. We use a regex to handle both + // cases. + if ((Prefix == "vzip" || Prefix == "vuzp") && OutTypeCode == "32") { + Result.push_back("{{vtrn|" + Prefix + "}}.32 " + RegisterSuffix); + return; + } + + // Currently on most ARM processors, we do not use vmla/vmls for + // quad floating point operations. Instead we output vmul + vadd. So + // check if we have one of those instructions and just output a + // check for vmul. + if (OutTypeCode == "f32") { + if (Prefix == "vmls") { + Result.push_back("vmul." + OutTypeCode + " " + RegisterSuffix); + Result.push_back("vsub." + OutTypeCode); + return; + } else if (Prefix == "vmla") { + Result.push_back("vmul." + OutTypeCode + " " + RegisterSuffix); + Result.push_back("vadd." + OutTypeCode); + return; + } + } + + // If we have vcvt, get the input type from the instruction name + // (which should be of the form instname_inputtype) and append it + // before the output type. + if (Prefix == "vcvt") { + const std::string inTypeCode = NameRef.substr(NameRef.find_last_of("_")+1); + Prefix += "." + inTypeCode; + } + + // Append output type code to get our final mangled instruction. + Prefix += "." + OutTypeCode; + + Result.push_back(Prefix + " " + RegisterSuffix); +} + /// UseMacro - Examine the prototype string to determine if the intrinsic /// should be defined as a preprocessor macro instead of an inline function. static bool UseMacro(const std::string &proto) { @@ -1342,7 +1822,7 @@ void NeonEmitter::run(raw_ostream &OS) { } } - OS<<"#define __ai static __attribute__((__always_inline__, __nodebug__))\n\n"; + OS<<"#define __ai static inline __attribute__((__always_inline__, __nodebug__))\n\n"; std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst"); @@ -1668,7 +2148,8 @@ void NeonEmitter::runHeader(raw_ostream &OS) { static std::string GenTest(const std::string &name, const std::string &proto, StringRef outTypeStr, StringRef inTypeStr, - bool isShift) { + bool isShift, bool isHiddenLOp, + ClassKind ck, const std::string &InstName) { assert(!proto.empty() && ""); std::string s; @@ -1683,9 +2164,22 @@ static std::string GenTest(const std::string &name, mangledName = MangleName(mangledName, inTypeNoQuad, ClassS); } + std::vector<std::string> FileCheckPatterns; + GenerateChecksForIntrinsic(name, proto, outTypeStr, inTypeStr, ck, InstName, + isHiddenLOp, FileCheckPatterns); + // Emit the FileCheck patterns. s += "// CHECK: test_" + mangledName + "\n"; - // s += "// CHECK: \n"; // FIXME: + expected instruction opcode. + // If for any reason we do not want to emit a check, mangledInst + // will be the empty string. + if (FileCheckPatterns.size()) { + for (std::vector<std::string>::const_iterator i = FileCheckPatterns.begin(), + e = FileCheckPatterns.end(); + i != e; + ++i) { + s += "// CHECK: " + *i + "\n"; + } + } // Emit the start of the test function. s += TypeString(proto[0], outTypeStr) + " test_" + mangledName + "("; @@ -1727,8 +2221,9 @@ static std::string GenTest(const std::string &name, /// intrinsics. void NeonEmitter::runTests(raw_ostream &OS) { OS << - "// RUN: %clang_cc1 -triple thumbv7-apple-darwin \\\n" - "// RUN: -target-cpu cortex-a9 -ffreestanding -S -o - %s | FileCheck %s\n" + "// RUN: %clang_cc1 -triple thumbv7s-apple-darwin -target-abi apcs-gnu\\\n" + "// RUN: -target-cpu swift -ffreestanding -Os -S -o - %s\\\n" + "// RUN: | FileCheck %s\n" "\n" "#include <arm_neon.h>\n" "\n"; @@ -1740,10 +2235,13 @@ void NeonEmitter::runTests(raw_ostream &OS) { std::string Proto = R->getValueAsString("Prototype"); std::string Types = R->getValueAsString("Types"); bool isShift = R->getValueAsBit("isShift"); + std::string InstName = R->getValueAsString("InstName"); + bool isHiddenLOp = R->getValueAsBit("isHiddenLInst"); SmallVector<StringRef, 16> TypeVec; ParseTypes(R, Types, TypeVec); + ClassKind ck = ClassMap[R->getSuperClasses()[1]]; OpKind kind = OpMap[R->getValueAsDef("Operand")->getName()]; if (kind == OpUnavailable) continue; @@ -1758,10 +2256,12 @@ void NeonEmitter::runTests(raw_ostream &OS) { (void)ClassifyType(TypeVec[srcti], inQuad, dummy, dummy); if (srcti == ti || inQuad != outQuad) continue; - OS << GenTest(name, Proto, TypeVec[ti], TypeVec[srcti], isShift); + OS << GenTest(name, Proto, TypeVec[ti], TypeVec[srcti], + isShift, isHiddenLOp, ck, InstName); } } else { - OS << GenTest(name, Proto, TypeVec[ti], TypeVec[ti], isShift); + OS << GenTest(name, Proto, TypeVec[ti], TypeVec[ti], + isShift, isHiddenLOp, ck, InstName); } } OS << "\n"; diff --git a/utils/TableGen/TableGen.cpp b/utils/TableGen/TableGen.cpp index 3df8940..12e1c47 100644 --- a/utils/TableGen/TableGen.cpp +++ b/utils/TableGen/TableGen.cpp @@ -24,6 +24,7 @@ using namespace clang; enum ActionType { GenClangAttrClasses, + GenClangAttrExprArgsList, GenClangAttrImpl, GenClangAttrList, GenClangAttrPCHRead, @@ -62,6 +63,10 @@ namespace { "Generate option parser implementation"), clEnumValN(GenClangAttrClasses, "gen-clang-attr-classes", "Generate clang attribute clases"), + clEnumValN(GenClangAttrExprArgsList, + "gen-clang-attr-expr-args-list", + "Generate a clang attribute expression " + "arguments list"), clEnumValN(GenClangAttrImpl, "gen-clang-attr-impl", "Generate clang attribute implementations"), clEnumValN(GenClangAttrList, "gen-clang-attr-list", @@ -143,6 +148,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) { case GenClangAttrClasses: EmitClangAttrClass(Records, OS); break; + case GenClangAttrExprArgsList: + EmitClangAttrExprArgsList(Records, OS); + break; case GenClangAttrImpl: EmitClangAttrImpl(Records, OS); break; diff --git a/utils/TableGen/TableGenBackends.h b/utils/TableGen/TableGenBackends.h index 03708b6..0ff33d7 100644 --- a/utils/TableGen/TableGenBackends.h +++ b/utils/TableGen/TableGenBackends.h @@ -30,6 +30,7 @@ void EmitClangASTNodes(RecordKeeper &RK, raw_ostream &OS, const std::string &N, const std::string &S); void EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS); +void EmitClangAttrExprArgsList(RecordKeeper &Records, raw_ostream &OS); void EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS); void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS); void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS); diff --git a/utils/analyzer/SATestBuild.py b/utils/analyzer/SATestBuild.py index 067be16..e119155 100644 --- a/utils/analyzer/SATestBuild.py +++ b/utils/analyzer/SATestBuild.py @@ -168,8 +168,9 @@ SBOutputDirName = "ScanBuildResults" SBOutputDirReferencePrefix = "Ref" # The list of checkers used during analyzes. -# Currently, consists of all the non experimental checkers. -Checkers="alpha.unix.SimpleStream,alpha.security.taint,core,deadcode,security,unix,osx" +# Currently, consists of all the non experimental checkers, plus a few alpha +# checkers we don't want to regress on. +Checkers="alpha.unix.SimpleStream,alpha.security.taint,alpha.cplusplus.NewDeleteLeaks,core,cplusplus,deadcode,security,unix,osx" Verbose = 1 |